mirror of
https://github.com/restic/restic.git
synced 2025-08-25 19:37:35 +00:00
Compare commits
42 Commits
v0.8.1
...
add-webdav
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0912a8db07 | ||
![]() |
eefeb387d9 | ||
![]() |
c7d789ab04 | ||
![]() |
92918ef1b6 | ||
![]() |
f49f5c5903 | ||
![]() |
d89f2e5226 | ||
![]() |
02f4f5dc66 | ||
![]() |
5723636b35 | ||
![]() |
5632ca4f44 | ||
![]() |
539599d1f1 | ||
![]() |
d77a326bb0 | ||
![]() |
99f0fce673 | ||
![]() |
b708cfee3f | ||
![]() |
b8620429e2 | ||
![]() |
aab414b368 | ||
![]() |
e120b17abd | ||
![]() |
5f43e17918 | ||
![]() |
489eef5a6f | ||
![]() |
8c550ca011 | ||
![]() |
032621289b | ||
![]() |
92316a9853 | ||
![]() |
180741609e | ||
![]() |
70250762f3 | ||
![]() |
39ba17a2d6 | ||
![]() |
cfe8c8c9cd | ||
![]() |
b45fc89512 | ||
![]() |
aabc0ccaa7 | ||
![]() |
2bc4d200d4 | ||
![]() |
0247fe01c0 | ||
![]() |
c912b38bf0 | ||
![]() |
559946c58a | ||
![]() |
a99637c613 | ||
![]() |
36501dda73 | ||
![]() |
18ecd9df30 | ||
![]() |
c686dd0448 | ||
![]() |
6d91d468e7 | ||
![]() |
5f9b5b0219 | ||
![]() |
3f7d85360a | ||
![]() |
8d8456590c | ||
![]() |
85f9f3e290 | ||
![]() |
9e1180a29b | ||
![]() |
e17c1096a0 |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -25,7 +25,7 @@ Link issues and relevant forum posts here.
|
|||||||
- [ ] I have read the [Contribution Guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches)
|
- [ ] I have read the [Contribution Guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches)
|
||||||
- [ ] I have added tests for all changes in this PR
|
- [ ] I have added tests for all changes in this PR
|
||||||
- [ ] I have added documentation for the changes (in the manual)
|
- [ ] I have added documentation for the changes (in the manual)
|
||||||
- [ ] There's an entry in the `CHANGELOG.md` file that describe the changes for our users
|
- [ ] There's a new file in a subdir of `changelog/x.y.z` that describe the changes for our users (template [here](https://github.com/restic/restic/blob/master/changelog/changelog-entry.tmpl))
|
||||||
- [ ] I have run `gofmt` on the code in all commits
|
- [ ] I have run `gofmt` on the code in all commits
|
||||||
- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits)
|
- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits)
|
||||||
- [ ] I'm done, this Pull Request is ready for review
|
- [ ] I'm done, this Pull Request is ready for review
|
||||||
|
34
CHANGELOG.md
34
CHANGELOG.md
@@ -1,4 +1,4 @@
|
|||||||
Changelog for restic 0.8.1 (UNRELEASED)
|
Changelog for restic 0.8.1 (2017-12-27)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
The following sections list the changes in restic 0.8.1 relevant to
|
The following sections list the changes in restic 0.8.1 relevant to
|
||||||
@@ -9,7 +9,7 @@ Summary
|
|||||||
|
|
||||||
* Fix #1457: Improve s3 backend with DigitalOcean Spaces
|
* Fix #1457: Improve s3 backend with DigitalOcean Spaces
|
||||||
* Fix #1454: Correct cache dir location for Windows and Darwin
|
* Fix #1454: Correct cache dir location for Windows and Darwin
|
||||||
* Fix #1457: Disable handling SIGPIPE
|
* Fix #1459: Disable handling SIGPIPE
|
||||||
* Chg #1452: Do not save atime by default
|
* Chg #1452: Do not save atime by default
|
||||||
* Enh #1436: Add code to detect old cache directories
|
* Enh #1436: Add code to detect old cache directories
|
||||||
* Enh #1439: Improve cancellation logic
|
* Enh #1439: Improve cancellation logic
|
||||||
@@ -30,7 +30,7 @@ Details
|
|||||||
|
|
||||||
https://github.com/restic/restic/pull/1454
|
https://github.com/restic/restic/pull/1454
|
||||||
|
|
||||||
* Bugfix #1457: Disable handling SIGPIPE
|
* Bugfix #1459: Disable handling SIGPIPE
|
||||||
|
|
||||||
We've disabled handling SIGPIPE again. Turns out, writing to broken TCP connections also
|
We've disabled handling SIGPIPE again. Turns out, writing to broken TCP connections also
|
||||||
raised SIGPIPE, so restic exits on the first write to a broken connection. Instead, restic
|
raised SIGPIPE, so restic exits on the first write to a broken connection. Instead, restic
|
||||||
@@ -87,7 +87,7 @@ Summary
|
|||||||
* Fix #1256: Re-enable workaround for S3 backend
|
* Fix #1256: Re-enable workaround for S3 backend
|
||||||
* Fix #1291: Reuse backend TCP connections to BackBlaze B2
|
* Fix #1291: Reuse backend TCP connections to BackBlaze B2
|
||||||
* Fix #1317: Run prune when `forget --prune` is called with just snapshot IDs
|
* Fix #1317: Run prune when `forget --prune` is called with just snapshot IDs
|
||||||
* Fix #1292: Remove implicit path `/restic` for the s3 backend
|
* Fix #1437: Remove implicit path `/restic` for the s3 backend
|
||||||
* Enh #1102: Add subdirectory `ids` to fuse mount
|
* Enh #1102: Add subdirectory `ids` to fuse mount
|
||||||
* Enh #1114: Add `--cacert` to specify TLS certificates to check against
|
* Enh #1114: Add `--cacert` to specify TLS certificates to check against
|
||||||
* Enh #1216: Add upload/download limiting
|
* Enh #1216: Add upload/download limiting
|
||||||
@@ -96,11 +96,11 @@ Summary
|
|||||||
* Enh #1367: Allow comments in files read from via `--file-from`
|
* Enh #1367: Allow comments in files read from via `--file-from`
|
||||||
* Enh #448: Sftp backend prompts for password
|
* Enh #448: Sftp backend prompts for password
|
||||||
* Enh #510: Add `dump` command
|
* Enh #510: Add `dump` command
|
||||||
* Enh #29: Add local metadata cache
|
* Enh #1040: Add local metadata cache
|
||||||
* Enh #1249: Add `latest` symlink in fuse mount
|
* Enh #1249: Add `latest` symlink in fuse mount
|
||||||
* Enh #1269: Add `--compact` to `forget` command
|
* Enh #1269: Add `--compact` to `forget` command
|
||||||
* Enh #1281: Google Cloud Storage backend needs less permissions
|
* Enh #1281: Google Cloud Storage backend needs less permissions
|
||||||
* Enh #1303: Make `check` print `no errors found` explicitly
|
* Enh #1319: Make `check` print `no errors found` explicitly
|
||||||
* Enh #1353: Retry failed backend requests
|
* Enh #1353: Retry failed backend requests
|
||||||
|
|
||||||
Details
|
Details
|
||||||
@@ -148,7 +148,7 @@ Details
|
|||||||
|
|
||||||
https://github.com/restic/restic/pull/1317
|
https://github.com/restic/restic/pull/1317
|
||||||
|
|
||||||
* Bugfix #1292: Remove implicit path `/restic` for the s3 backend
|
* Bugfix #1437: Remove implicit path `/restic` for the s3 backend
|
||||||
|
|
||||||
The s3 backend used the subdir `restic` within a bucket if no explicit path after the bucket name
|
The s3 backend used the subdir `restic` within a bucket if no explicit path after the bucket name
|
||||||
was specified. Since this version, restic does not use this default path any more. If you
|
was specified. Since this version, restic does not use this default path any more. If you
|
||||||
@@ -226,7 +226,7 @@ Details
|
|||||||
https://github.com/restic/restic/issues/510
|
https://github.com/restic/restic/issues/510
|
||||||
https://github.com/restic/restic/pull/1346
|
https://github.com/restic/restic/pull/1346
|
||||||
|
|
||||||
* Enhancement #29: Add local metadata cache
|
* Enhancement #1040: Add local metadata cache
|
||||||
|
|
||||||
We've added a local cache for metadata so that restic doesn't need to load all metadata
|
We've added a local cache for metadata so that restic doesn't need to load all metadata
|
||||||
(snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but
|
(snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but
|
||||||
@@ -270,7 +270,7 @@ Details
|
|||||||
|
|
||||||
https://github.com/restic/restic/pull/1281
|
https://github.com/restic/restic/pull/1281
|
||||||
|
|
||||||
* Enhancement #1303: Make `check` print `no errors found` explicitly
|
* Enhancement #1319: Make `check` print `no errors found` explicitly
|
||||||
|
|
||||||
The `check` command now explicetly prints `No errors were found` when no errors could be found.
|
The `check` command now explicetly prints `No errors were found` when no errors could be found.
|
||||||
|
|
||||||
@@ -326,9 +326,9 @@ Summary
|
|||||||
* Enh #1044: Improve `restore`, do not traverse/load excluded directories
|
* Enh #1044: Improve `restore`, do not traverse/load excluded directories
|
||||||
* Enh #1061: Add Dockerfile and official Docker image
|
* Enh #1061: Add Dockerfile and official Docker image
|
||||||
* Enh #1126: Use the standard Go git repository layout, use `dep` for vendoring
|
* Enh #1126: Use the standard Go git repository layout, use `dep` for vendoring
|
||||||
* Enh #211: Add support for storing backups on Google Cloud Storage
|
* Enh #1134: Add support for storing backups on Google Cloud Storage
|
||||||
* Enh #1144: Properly report errors when reading files with exclude patterns.
|
* Enh #1144: Properly report errors when reading files with exclude patterns.
|
||||||
* Enh #609: Add support for storing backups on Microsoft Azure Blob Storage
|
* Enh #1149: Add support for storing backups on Microsoft Azure Blob Storage
|
||||||
* Enh #1196: Add `--group-by` to `forget` command for flexible grouping
|
* Enh #1196: Add `--group-by` to `forget` command for flexible grouping
|
||||||
* Enh #1203: Print stats on all BSD systems when SIGINFO (ctrl+t) is received
|
* Enh #1203: Print stats on all BSD systems when SIGINFO (ctrl+t) is received
|
||||||
* Enh #1205: Allow specifying time/date for a backup with `--time`
|
* Enh #1205: Allow specifying time/date for a backup with `--time`
|
||||||
@@ -409,7 +409,7 @@ Details
|
|||||||
|
|
||||||
https://github.com/restic/restic/pull/1126
|
https://github.com/restic/restic/pull/1126
|
||||||
|
|
||||||
* Enhancement #211: Add support for storing backups on Google Cloud Storage
|
* Enhancement #1134: Add support for storing backups on Google Cloud Storage
|
||||||
|
|
||||||
https://github.com/restic/restic/issues/211
|
https://github.com/restic/restic/issues/211
|
||||||
https://github.com/restic/restic/pull/1134
|
https://github.com/restic/restic/pull/1134
|
||||||
@@ -419,7 +419,7 @@ Details
|
|||||||
|
|
||||||
https://github.com/restic/restic/pull/1144
|
https://github.com/restic/restic/pull/1144
|
||||||
|
|
||||||
* Enhancement #609: Add support for storing backups on Microsoft Azure Blob Storage
|
* Enhancement #1149: Add support for storing backups on Microsoft Azure Blob Storage
|
||||||
|
|
||||||
The library we're using to access the service requires Go 1.8, so restic now needs at least Go
|
The library we're using to access the service requires Go 1.8, so restic now needs at least Go
|
||||||
1.8.
|
1.8.
|
||||||
@@ -655,14 +655,14 @@ restic users. The changes are ordered by importance.
|
|||||||
Summary
|
Summary
|
||||||
-------
|
-------
|
||||||
|
|
||||||
* Enh #953: Make `forget` consistent
|
* Enh #957: Make `forget` consistent
|
||||||
* Enh #965: Unify repository layout for all backends
|
* Enh #966: Unify repository layout for all backends
|
||||||
* Enh #962: Improve memory and runtime for the s3 backend
|
* Enh #962: Improve memory and runtime for the s3 backend
|
||||||
|
|
||||||
Details
|
Details
|
||||||
-------
|
-------
|
||||||
|
|
||||||
* Enhancement #953: Make `forget` consistent
|
* Enhancement #957: Make `forget` consistent
|
||||||
|
|
||||||
The `forget` command was corrected to be more consistent in which snapshots are to be
|
The `forget` command was corrected to be more consistent in which snapshots are to be
|
||||||
forgotten. It is possible that the new code removes more snapshots than before, so please
|
forgotten. It is possible that the new code removes more snapshots than before, so please
|
||||||
@@ -671,7 +671,7 @@ Details
|
|||||||
https://github.com/restic/restic/issues/953
|
https://github.com/restic/restic/issues/953
|
||||||
https://github.com/restic/restic/pull/957
|
https://github.com/restic/restic/pull/957
|
||||||
|
|
||||||
* Enhancement #965: Unify repository layout for all backends
|
* Enhancement #966: Unify repository layout for all backends
|
||||||
|
|
||||||
Up to now the s3 backend used a special repository layout. We've decided to unify the repository
|
Up to now the s3 backend used a special repository layout. We've decided to unify the repository
|
||||||
layout and implemented the default layout also for the s3 backend. For creating a new
|
layout and implemented the default layout also for the s3 backend. For creating a new
|
||||||
|
4
Gopkg.lock
generated
4
Gopkg.lock
generated
@@ -178,7 +178,7 @@
|
|||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = ["context","context/ctxhttp"]
|
packages = ["context","context/ctxhttp","webdav","webdav/internal/xml"]
|
||||||
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
@@ -214,6 +214,6 @@
|
|||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d"
|
inputs-digest = "b01eeeb2be041c7cd11f9ee50324ef456ac1e1cd0720408c6d72f88f92f09320"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
@@ -102,7 +102,7 @@ News
|
|||||||
----
|
----
|
||||||
|
|
||||||
You can follow the restic project on Twitter `@resticbackup <https://twitter.com/resticbackup>`__ or by subscribing to
|
You can follow the restic project on Twitter `@resticbackup <https://twitter.com/resticbackup>`__ or by subscribing to
|
||||||
the `development blog <https://restic.github.io/blog/>`__.
|
the `development blog <https://restic.net/blog/>`__.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
105
build.go
105
build.go
@@ -11,6 +11,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,10 +23,11 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var config = struct {
|
var config = struct {
|
||||||
Name string
|
Name string
|
||||||
Namespace string
|
Namespace string
|
||||||
Main string
|
Main string
|
||||||
Tests []string
|
Tests []string
|
||||||
|
MinVersion GoVersion
|
||||||
}{
|
}{
|
||||||
Name: "restic", // name of the program executable and directory
|
Name: "restic", // name of the program executable and directory
|
||||||
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
||||||
@@ -33,6 +35,7 @@ var config = struct {
|
|||||||
Tests: []string{ // tests to run
|
Tests: []string{ // tests to run
|
||||||
"github.com/restic/restic/internal/...",
|
"github.com/restic/restic/internal/...",
|
||||||
"github.com/restic/restic/cmd/..."},
|
"github.com/restic/restic/cmd/..."},
|
||||||
|
MinVersion: GoVersion{Major: 1, Minor: 8, Patch: 0}, // minimum Go version supported
|
||||||
}
|
}
|
||||||
|
|
||||||
// specialDir returns true if the file begins with a special character ('.' or '_').
|
// specialDir returns true if the file begins with a special character ('.' or '_').
|
||||||
@@ -137,7 +140,6 @@ func copyFile(dst, src string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fsrc.Close()
|
|
||||||
|
|
||||||
if err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
if err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||||
fmt.Printf("MkdirAll(%v)\n", filepath.Dir(dst))
|
fmt.Printf("MkdirAll(%v)\n", filepath.Dir(dst))
|
||||||
@@ -148,17 +150,28 @@ func copyFile(dst, src string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fdst.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(fdst, fsrc)
|
if _, err = io.Copy(fdst, fsrc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = fsrc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = fdst.Close()
|
||||||
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = os.Chmod(dst, fi.Mode())
|
err = os.Chmod(dst, fi.Mode())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = os.Chtimes(dst, fi.ModTime(), fi.ModTime())
|
err = os.Chtimes(dst, fi.ModTime(), fi.ModTime())
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// die prints the message with fmt.Fprintf() to stderr and exits with an error
|
// die prints the message with fmt.Fprintf() to stderr and exits with an error
|
||||||
@@ -300,10 +313,80 @@ func (cs Constants) LDFlags() string {
|
|||||||
return strings.Join(l, " ")
|
return strings.Join(l, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GoVersion is the version of Go used to compile the project.
|
||||||
|
type GoVersion struct {
|
||||||
|
Major int
|
||||||
|
Minor int
|
||||||
|
Patch int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseGoVersion parses the Go version s. If s cannot be parsed, the returned GoVersion is null.
|
||||||
|
func ParseGoVersion(s string) (v GoVersion) {
|
||||||
|
if !strings.HasPrefix(s, "go") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s = s[2:]
|
||||||
|
data := strings.Split(s, ".")
|
||||||
|
if len(data) != 3 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
major, err := strconv.Atoi(data[0])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
minor, err := strconv.Atoi(data[1])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
patch, err := strconv.Atoi(data[2])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v = GoVersion{
|
||||||
|
Major: major,
|
||||||
|
Minor: minor,
|
||||||
|
Patch: patch,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtLeast returns true if v is at least as new as other. If v is empty, true is returned.
|
||||||
|
func (v GoVersion) AtLeast(other GoVersion) bool {
|
||||||
|
var empty GoVersion
|
||||||
|
|
||||||
|
// the empty version satisfies all versions
|
||||||
|
if v == empty {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Major < other.Major {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Minor < other.Minor {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Patch < other.Patch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v GoVersion) String() string {
|
||||||
|
return fmt.Sprintf("Go %d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
ver := runtime.Version()
|
ver := ParseGoVersion(runtime.Version())
|
||||||
if strings.HasPrefix(ver, "go1") && ver < "go1.8" {
|
if !ver.AtLeast(config.MinVersion) {
|
||||||
fmt.Fprintf(os.Stderr, "Go version %s detected, restic requires at least Go 1.8\n", ver)
|
fmt.Fprintf(os.Stderr, "%s detected, this program requires at least %s\n", ver, config.MinVersion)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
Bugfix: Improve s3 backend with DigitalOcean Spaces
|
Bugfix: Improve s3 backend with DigitalOcean Spaces
|
||||||
|
|
||||||
https://github.com/restic/restic/pull/1459
|
|
||||||
https://github.com/restic/restic/issues/1457
|
https://github.com/restic/restic/issues/1457
|
||||||
|
https://github.com/restic/restic/pull/1459
|
||||||
|
4
changelog/0.8.2/issue-1506
Normal file
4
changelog/0.8.2/issue-1506
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Bugfix: Limit bandwith at the http.RoundTripper for HTTP based backends
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1506
|
||||||
|
https://github.com/restic/restic/pull/1511
|
9
changelog/0.8.2/issue-1512
Normal file
9
changelog/0.8.2/issue-1512
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Bugfix: Restore directory permissions as the last step
|
||||||
|
|
||||||
|
This change allows restoring into directories that were not writable during
|
||||||
|
backup. Before, restic created the directory, set the read-only mode and then
|
||||||
|
failed to create files in the directory. This change now restores the directory
|
||||||
|
(with its permissions) as the very last step.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1512
|
||||||
|
https://github.com/restic/restic/pull/1536
|
4
changelog/0.8.2/issue-1528
Normal file
4
changelog/0.8.2/issue-1528
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Bugfix: Correctly create missing subdirs in data/
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1528
|
||||||
|
https://github.com/restic/restic/pull/1529
|
3
changelog/0.8.2/pull-1507
Normal file
3
changelog/0.8.2/pull-1507
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Enhancement: Only reload snapshots once per minute for fuse mount
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/1507
|
7
changelog/0.8.2/pull-1538
Normal file
7
changelog/0.8.2/pull-1538
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Reduce memory allocations for querying the index
|
||||||
|
|
||||||
|
This change reduces the internal memory allocations when the index data
|
||||||
|
structures in memory are queried if a blob (part of a file) already exists in
|
||||||
|
the repo. It should speed up backup a bit, and maybe even reduce RAM usage.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/1538
|
7
changelog/0.8.2/pull-1554
Normal file
7
changelog/0.8.2/pull-1554
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: fuse/mount: Correctly handle EOF, add template option
|
||||||
|
|
||||||
|
We've added the `--snapshot-template` string, which can be used to specify a
|
||||||
|
template for a snapshot directory. In addition, accessing data after the end of
|
||||||
|
a file via the fuse mount is now handled correctly.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/1554
|
@@ -19,7 +19,7 @@ Details
|
|||||||
{{ $par }}
|
{{ $par }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ range $id := .Issues -}}
|
{{ range $id := .Issues -}}
|
||||||
[{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}})
|
{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}})
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{ range $id := .PRs -}}
|
{{ range $id := .PRs -}}
|
||||||
{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/pull/{{ $id -}})
|
{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/pull/{{ $id -}})
|
||||||
|
@@ -3,7 +3,8 @@
|
|||||||
#
|
#
|
||||||
# The resulting changelog generated by `calens` will list all versions in
|
# The resulting changelog generated by `calens` will list all versions in
|
||||||
# exactly this order.
|
# exactly this order.
|
||||||
0.8.1
|
0.8.2
|
||||||
|
0.8.1 2017-12-27
|
||||||
0.8.0 2017-11-26
|
0.8.0 2017-11-26
|
||||||
0.7.3 2017-09-20
|
0.7.3 2017-09-20
|
||||||
0.7.2 2017-09-13
|
0.7.2 2017-09-13
|
||||||
|
@@ -162,9 +162,6 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
|||||||
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
|
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(args) > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
policy := restic.ExpirePolicy{
|
policy := restic.ExpirePolicy{
|
||||||
Last: opts.Last,
|
Last: opts.Last,
|
||||||
@@ -176,56 +173,57 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
|||||||
Tags: opts.KeepTags,
|
Tags: opts.KeepTags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if policy.Empty() {
|
if policy.Empty() && len(args) == 0 {
|
||||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, snapshotGroup := range snapshotGroups {
|
if !policy.Empty() {
|
||||||
var key key
|
for k, snapshotGroup := range snapshotGroups {
|
||||||
if json.Unmarshal([]byte(k), &key) != nil {
|
var key key
|
||||||
return err
|
if json.Unmarshal([]byte(k), &key) != nil {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Info
|
// Info
|
||||||
Verbosef("snapshots")
|
Verbosef("snapshots")
|
||||||
var infoStrings []string
|
var infoStrings []string
|
||||||
if GroupByTag {
|
if GroupByTag {
|
||||||
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
||||||
}
|
}
|
||||||
if GroupByHost {
|
if GroupByHost {
|
||||||
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
||||||
}
|
}
|
||||||
if GroupByPath {
|
if GroupByPath {
|
||||||
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
||||||
}
|
}
|
||||||
if infoStrings != nil {
|
if infoStrings != nil {
|
||||||
Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
|
Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
|
||||||
}
|
}
|
||||||
Verbosef(":\n\n")
|
Verbosef(":\n\n")
|
||||||
|
|
||||||
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
|
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
|
||||||
|
|
||||||
if len(keep) != 0 && !gopts.Quiet {
|
if len(keep) != 0 && !gopts.Quiet {
|
||||||
Printf("keep %d snapshots:\n", len(keep))
|
Printf("keep %d snapshots:\n", len(keep))
|
||||||
PrintSnapshots(globalOptions.stdout, keep, opts.Compact)
|
PrintSnapshots(globalOptions.stdout, keep, opts.Compact)
|
||||||
Printf("\n")
|
Printf("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(remove) != 0 && !gopts.Quiet {
|
if len(remove) != 0 && !gopts.Quiet {
|
||||||
Printf("remove %d snapshots:\n", len(remove))
|
Printf("remove %d snapshots:\n", len(remove))
|
||||||
PrintSnapshots(globalOptions.stdout, remove, opts.Compact)
|
PrintSnapshots(globalOptions.stdout, remove, opts.Compact)
|
||||||
Printf("\n")
|
Printf("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
removeSnapshots += len(remove)
|
removeSnapshots += len(remove)
|
||||||
|
|
||||||
if !opts.DryRun {
|
if !opts.DryRun {
|
||||||
for _, sn := range remove {
|
for _, sn := range remove {
|
||||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||||
err = repo.Backend().Remove(gopts.ctx, h)
|
err = repo.Backend().Remove(gopts.ctx, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -5,6 +5,8 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
@@ -25,6 +27,21 @@ var cmdMount = &cobra.Command{
|
|||||||
Long: `
|
Long: `
|
||||||
The "mount" command mounts the repository via fuse to a directory. This is a
|
The "mount" command mounts the repository via fuse to a directory. This is a
|
||||||
read-only mount.
|
read-only mount.
|
||||||
|
|
||||||
|
Snapshot Directories
|
||||||
|
====================
|
||||||
|
|
||||||
|
If you need a different template for all directories that contain snapshots,
|
||||||
|
you can pass a template via --snapshot-template. Example without colons:
|
||||||
|
|
||||||
|
--snapshot-template "2006-01-02_15-04-05"
|
||||||
|
|
||||||
|
You need to specify a sample format for exactly the following timestamp:
|
||||||
|
|
||||||
|
Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
|
||||||
|
For details please see the documentation for time.Format() at:
|
||||||
|
https://godoc.org/time#Time.Format
|
||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
@@ -34,12 +51,13 @@ read-only mount.
|
|||||||
|
|
||||||
// MountOptions collects all options for the mount command.
|
// MountOptions collects all options for the mount command.
|
||||||
type MountOptions struct {
|
type MountOptions struct {
|
||||||
OwnerRoot bool
|
OwnerRoot bool
|
||||||
AllowRoot bool
|
AllowRoot bool
|
||||||
AllowOther bool
|
AllowOther bool
|
||||||
Host string
|
Host string
|
||||||
Tags restic.TagLists
|
Tags restic.TagLists
|
||||||
Paths []string
|
Paths []string
|
||||||
|
SnapshotTemplate string
|
||||||
}
|
}
|
||||||
|
|
||||||
var mountOptions MountOptions
|
var mountOptions MountOptions
|
||||||
@@ -55,6 +73,8 @@ func init() {
|
|||||||
mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
|
mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
|
||||||
mountFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`")
|
mountFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`")
|
||||||
mountFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
|
mountFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
|
||||||
|
|
||||||
|
mountFlags.StringVar(&mountOptions.SnapshotTemplate, "snapshot-template", time.RFC3339, "set `template` to use for snapshot dirs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||||
@@ -108,10 +128,11 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := fuse.Config{
|
cfg := fuse.Config{
|
||||||
OwnerIsRoot: opts.OwnerRoot,
|
OwnerIsRoot: opts.OwnerRoot,
|
||||||
Host: opts.Host,
|
Host: opts.Host,
|
||||||
Tags: opts.Tags,
|
Tags: opts.Tags,
|
||||||
Paths: opts.Paths,
|
Paths: opts.Paths,
|
||||||
|
SnapshotTemplate: opts.SnapshotTemplate,
|
||||||
}
|
}
|
||||||
root, err := fuse.NewRoot(gopts.ctx, repo, cfg)
|
root, err := fuse.NewRoot(gopts.ctx, repo, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -136,6 +157,10 @@ func umount(mountpoint string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
|
func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
|
||||||
|
if strings.ContainsAny(opts.SnapshotTemplate, `\/`) {
|
||||||
|
return errors.Fatal("snapshot template string contains a slash (/) or backslash (\\) character")
|
||||||
|
}
|
||||||
|
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errors.Fatal("wrong number of parameters")
|
return errors.Fatal("wrong number of parameters")
|
||||||
}
|
}
|
||||||
|
@@ -53,8 +53,11 @@ func init() {
|
|||||||
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||||
ctx := gopts.ctx
|
ctx := gopts.ctx
|
||||||
|
|
||||||
if len(args) != 1 {
|
switch {
|
||||||
|
case len(args) == 0:
|
||||||
return errors.Fatal("no snapshot ID specified")
|
return errors.Fatal("no snapshot ID specified")
|
||||||
|
case len(args) > 1:
|
||||||
|
return errors.Fatalf("more than one snapshot ID specified: %v", args)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Target == "" {
|
if opts.Target == "" {
|
||||||
|
96
cmd/restic/cmd_webdav.go
Normal file
96
cmd/restic/cmd_webdav.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
// +build !openbsd
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/serve"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cmdWebDAV = &cobra.Command{
|
||||||
|
Use: "webdav [flags]",
|
||||||
|
Short: "runs a WebDAV server for the repository",
|
||||||
|
Long: `
|
||||||
|
The webdav command runs a WebDAV server for the reposiotry that you can then access via a WebDAV client.
|
||||||
|
`,
|
||||||
|
DisableAutoGenTag: true,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runWebDAV(webdavOptions, globalOptions, args)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebDAVOptions collects all options for the webdav command.
|
||||||
|
type WebDAVOptions struct {
|
||||||
|
Listen string
|
||||||
|
|
||||||
|
Host string
|
||||||
|
Tags restic.TagLists
|
||||||
|
Paths []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var webdavOptions WebDAVOptions
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmdRoot.AddCommand(cmdWebDAV)
|
||||||
|
|
||||||
|
webdavFlags := cmdWebDAV.Flags()
|
||||||
|
webdavFlags.StringVarP(&webdavOptions.Listen, "listen", "l", "localhost:3080", "set the listen host name and `address`")
|
||||||
|
|
||||||
|
webdavFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
|
||||||
|
webdavFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`")
|
||||||
|
webdavFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWebDAV(opts WebDAVOptions, gopts GlobalOptions, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.Fatal("this command does not accept additional arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
repo, err := OpenRepository(gopts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lock, err := lockRepo(repo)
|
||||||
|
defer unlockRepo(lock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = repo.LoadIndex(gopts.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errorLogger := log.New(os.Stderr, "error log: ", log.Flags())
|
||||||
|
|
||||||
|
cfg := serve.Config{
|
||||||
|
Host: opts.Host,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
Paths: opts.Paths,
|
||||||
|
}
|
||||||
|
|
||||||
|
h, err := serve.NewWebDAV(gopts.ctx, repo, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := &http.Server{
|
||||||
|
ReadTimeout: 60 * time.Second,
|
||||||
|
WriteTimeout: 60 * time.Second,
|
||||||
|
Addr: opts.Listen,
|
||||||
|
Handler: h,
|
||||||
|
ErrorLog: errorLogger,
|
||||||
|
}
|
||||||
|
|
||||||
|
return srv.ListenAndServe()
|
||||||
|
}
|
@@ -323,16 +323,11 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
|||||||
return nil, errors.Fatal("Please specify repository location (-r)")
|
return nil, errors.Fatal("Please specify repository location (-r)")
|
||||||
}
|
}
|
||||||
|
|
||||||
be, err := open(opts.Repo, opts.extended)
|
be, err := open(opts.Repo, opts, opts.extended)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.LimitUploadKb > 0 || opts.LimitDownloadKb > 0 {
|
|
||||||
debug.Log("rate limiting backend to %d KiB/s upload and %d KiB/s download", opts.LimitUploadKb, opts.LimitDownloadKb)
|
|
||||||
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(opts.LimitUploadKb, opts.LimitDownloadKb))
|
|
||||||
}
|
|
||||||
|
|
||||||
be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {
|
be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {
|
||||||
Warnf("%v returned error, retrying after %v: %v\n", msg, d, err)
|
Warnf("%v returned error, retrying after %v: %v\n", msg, d, err)
|
||||||
})
|
})
|
||||||
@@ -532,7 +527,7 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the backend specified by a location config.
|
// Open the backend specified by a location config.
|
||||||
func open(s string, opts options.Options) (restic.Backend, error) {
|
func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
|
||||||
debug.Log("parsing location %v", s)
|
debug.Log("parsing location %v", s)
|
||||||
loc, err := location.Parse(s)
|
loc, err := location.Parse(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -551,11 +546,18 @@ func open(s string, opts options.Options) (restic.Backend, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wrap the transport so that the throughput via HTTP is limited
|
||||||
|
rt = limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb).Transport(rt)
|
||||||
|
|
||||||
switch loc.Scheme {
|
switch loc.Scheme {
|
||||||
case "local":
|
case "local":
|
||||||
be, err = local.Open(cfg.(local.Config))
|
be, err = local.Open(cfg.(local.Config))
|
||||||
|
// wrap the backend in a LimitBackend so that the throughput is limited
|
||||||
|
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb))
|
||||||
case "sftp":
|
case "sftp":
|
||||||
be, err = sftp.Open(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
|
be, err = sftp.Open(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
|
||||||
|
// wrap the backend in a LimitBackend so that the throughput is limited
|
||||||
|
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb))
|
||||||
case "s3":
|
case "s3":
|
||||||
be, err = s3.Open(cfg.(s3.Config), rt)
|
be, err = s3.Open(cfg.(s3.Config), rt)
|
||||||
case "gs":
|
case "gs":
|
||||||
|
@@ -114,4 +114,5 @@ via other protocols.
|
|||||||
The following may work:
|
The following may work:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
$ restic init -r sftp:user@nas:/restic-repo init
|
$ restic init -r sftp:user@nas:/restic-repo init
|
||||||
|
@@ -117,3 +117,14 @@ func (be *RetryBackend) Remove(ctx context.Context, h restic.Handle) (err error)
|
|||||||
return be.Backend.Remove(ctx, h)
|
return be.Backend.Remove(ctx, h)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test a boolean value whether a File with the name and type exists.
|
||||||
|
func (be *RetryBackend) Test(ctx context.Context, h restic.Handle) (exists bool, err error) {
|
||||||
|
err = be.retry(ctx, fmt.Sprintf("Test(%v)", h), func() error {
|
||||||
|
var innerError error
|
||||||
|
exists, innerError = be.Backend.Test(ctx, h)
|
||||||
|
|
||||||
|
return innerError
|
||||||
|
})
|
||||||
|
return exists, err
|
||||||
|
}
|
||||||
|
@@ -52,27 +52,7 @@ func Open(cfg Config) (*Local, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
be := &Local{Config: cfg, Layout: l}
|
return &Local{Config: cfg, Layout: l}, nil
|
||||||
|
|
||||||
// if data dir exists, make sure that all subdirs also exist
|
|
||||||
datadir := be.Dirname(restic.Handle{Type: restic.DataFile})
|
|
||||||
if dirExists(datadir) {
|
|
||||||
debug.Log("datadir %v exists", datadir)
|
|
||||||
for _, d := range be.Paths() {
|
|
||||||
if !fs.HasPathPrefix(datadir, d) {
|
|
||||||
debug.Log("%v is not subdir of datadir %v", d, datadir)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
debug.Log("MkdirAll %v", d)
|
|
||||||
err := fs.MkdirAll(d, backend.Modes.Dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "MkdirAll")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return be, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates all the necessary files and directories for a new local
|
// Create creates all the necessary files and directories for a new local
|
||||||
@@ -124,20 +104,24 @@ func (b *Local) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Type == restic.LockFile {
|
|
||||||
lockDir := b.Dirname(h)
|
|
||||||
if !dirExists(lockDir) {
|
|
||||||
debug.Log("locks/ does not exist yet, creating now.")
|
|
||||||
if err := fs.MkdirAll(lockDir, backend.Modes.Dir); err != nil {
|
|
||||||
return errors.Wrap(err, "MkdirAll")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := b.Filename(h)
|
filename := b.Filename(h)
|
||||||
|
|
||||||
// create new file
|
// create new file
|
||||||
f, err := fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File)
|
f, err := fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File)
|
||||||
|
|
||||||
|
if b.IsNotExist(err) {
|
||||||
|
debug.Log("error %v: creating dir", err)
|
||||||
|
|
||||||
|
// error is caused by a missing directory, try to create it
|
||||||
|
mkdirErr := os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
|
||||||
|
if mkdirErr != nil {
|
||||||
|
debug.Log("error creating dir %v: %v", filepath.Dir(filename), mkdirErr)
|
||||||
|
} else {
|
||||||
|
// try again
|
||||||
|
f, err = fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "OpenFile")
|
return errors.Wrap(err, "OpenFile")
|
||||||
}
|
}
|
||||||
@@ -254,7 +238,7 @@ func (b *Local) List(ctx context.Context, t restic.FileType) <-chan string {
|
|||||||
|
|
||||||
basedir, subdirs := b.Basedir(t)
|
basedir, subdirs := b.Basedir(t)
|
||||||
err := fs.Walk(basedir, func(path string, fi os.FileInfo, err error) error {
|
err := fs.Walk(basedir, func(path string, fi os.FileInfo, err error) error {
|
||||||
debug.Log("walk on %v, %v\n", path, fi.IsDir())
|
debug.Log("walk on %v\n", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -132,48 +132,11 @@ func Open(cfg Config, preExec, postExec func()) (*SFTP, error) {
|
|||||||
|
|
||||||
debug.Log("layout: %v\n", sftp.Layout)
|
debug.Log("layout: %v\n", sftp.Layout)
|
||||||
|
|
||||||
if err := sftp.checkDataSubdirs(); err != nil {
|
|
||||||
debug.Log("checkDataSubdirs returned %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sftp.Config = cfg
|
sftp.Config = cfg
|
||||||
sftp.p = cfg.Path
|
sftp.p = cfg.Path
|
||||||
return sftp, nil
|
return sftp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *SFTP) checkDataSubdirs() error {
|
|
||||||
datadir := r.Dirname(restic.Handle{Type: restic.DataFile})
|
|
||||||
|
|
||||||
// check if all paths for data/ exist
|
|
||||||
entries, err := r.ReadDir(datadir)
|
|
||||||
if r.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
subdirs := make(map[string]struct{}, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
subdirs[entry.Name()] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
subdir := fmt.Sprintf("%02x", i)
|
|
||||||
if _, ok := subdirs[subdir]; !ok {
|
|
||||||
debug.Log("subdir %v is missing, creating", subdir)
|
|
||||||
err := r.mkdirAll(path.Join(datadir, subdir), backend.Modes.Dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SFTP) mkdirAllDataSubdirs() error {
|
func (r *SFTP) mkdirAllDataSubdirs() error {
|
||||||
for _, d := range r.Paths() {
|
for _, d := range r.Paths() {
|
||||||
err := r.mkdirAll(d, backend.Modes.Dir)
|
err := r.mkdirAll(d, backend.Modes.Dir)
|
||||||
@@ -203,6 +166,8 @@ func (r *SFTP) ReadDir(dir string) ([]os.FileInfo, error) {
|
|||||||
|
|
||||||
// IsNotExist returns true if the error is caused by a not existing file.
|
// IsNotExist returns true if the error is caused by a not existing file.
|
||||||
func (r *SFTP) IsNotExist(err error) bool {
|
func (r *SFTP) IsNotExist(err error) bool {
|
||||||
|
err = errors.Cause(err)
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -334,14 +299,16 @@ func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err err
|
|||||||
|
|
||||||
// create new file
|
// create new file
|
||||||
f, err := r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
|
f, err := r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
|
||||||
if r.IsNotExist(errors.Cause(err)) {
|
|
||||||
// create the locks dir, then try again
|
|
||||||
err = r.mkdirAll(r.Dirname(h), backend.Modes.Dir)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "MkdirAll")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.Save(ctx, h, rd)
|
if r.IsNotExist(err) {
|
||||||
|
// error is caused by a missing directory, try to create it
|
||||||
|
mkdirErr := r.mkdirAll(r.Dirname(h), backend.Modes.Dir)
|
||||||
|
if mkdirErr != nil {
|
||||||
|
debug.Log("error creating dir %v: %v", r.Dirname(h), mkdirErr)
|
||||||
|
} else {
|
||||||
|
// try again
|
||||||
|
f, err = r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -34,5 +34,5 @@ func Fatal(s string) error {
|
|||||||
|
|
||||||
// Fatalf returns an error which implements the Fataler interface.
|
// Fatalf returns an error which implements the Fataler interface.
|
||||||
func Fatalf(s string, data ...interface{}) error {
|
func Fatalf(s string, data ...interface{}) error {
|
||||||
return fatalError(fmt.Sprintf(s, data...))
|
return Wrap(fatalError(fmt.Sprintf(s, data...)), "Fatal")
|
||||||
}
|
}
|
||||||
|
@@ -5,14 +5,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// HasPathPrefix returns true if p is a subdir of (or a file within) base. It
|
// HasPathPrefix returns true if p is a subdir of (or a file within) base. It
|
||||||
// assumes a file system which is case sensitive. For relative paths, false is
|
// assumes a file system which is case sensitive. If the paths are not of the
|
||||||
// returned.
|
// same type (one is relative, the other is absolute), false is returned.
|
||||||
func HasPathPrefix(base, p string) bool {
|
func HasPathPrefix(base, p string) bool {
|
||||||
if filepath.VolumeName(base) != filepath.VolumeName(p) {
|
if filepath.VolumeName(base) != filepath.VolumeName(p) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(base) || !filepath.IsAbs(p) {
|
// handle case when base and p are not of the same type
|
||||||
|
if filepath.IsAbs(base) != filepath.IsAbs(p) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -21,7 +21,10 @@ func TestHasPathPrefix(t *testing.T) {
|
|||||||
base, p string
|
base, p string
|
||||||
result bool
|
result bool
|
||||||
}{
|
}{
|
||||||
{"", "", false},
|
{"", "", true},
|
||||||
|
{".", ".", true},
|
||||||
|
{".", "foo", true},
|
||||||
|
{"foo", ".", false},
|
||||||
{"/", "", false},
|
{"/", "", false},
|
||||||
{"/", "x", false},
|
{"/", "x", false},
|
||||||
{"x", "/", false},
|
{"x", "/", false},
|
||||||
@@ -36,6 +39,10 @@ func TestHasPathPrefix(t *testing.T) {
|
|||||||
{"/home/user/foo", "/home/user/foobar", false},
|
{"/home/user/foo", "/home/user/foobar", false},
|
||||||
{"/home/user/Foo", "/home/user/foo/bar/baz", false},
|
{"/home/user/Foo", "/home/user/foo/bar/baz", false},
|
||||||
{"/home/user/foo", "/home/user/Foo/bar/baz", false},
|
{"/home/user/foo", "/home/user/Foo/bar/baz", false},
|
||||||
|
{"user/foo", "user/foo/bar/baz", true},
|
||||||
|
{"user/foo", "./user/foo", true},
|
||||||
|
{"user/foo", "./user/foo/", true},
|
||||||
|
{"/home/user/foo", "./user/foo/", false},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@@ -182,7 +182,6 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|||||||
node, ok := d.items[name]
|
node, ok := d.items[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
debug.Log(" Lookup(%v) -> not found", name)
|
debug.Log(" Lookup(%v) -> not found", name)
|
||||||
debug.Log(" items: %v\n", d.items)
|
|
||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
}
|
}
|
||||||
switch node.Type {
|
switch node.Type {
|
||||||
|
@@ -4,7 +4,6 @@
|
|||||||
package fuse
|
package fuse
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/restic/restic/internal/errors"
|
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
@@ -111,7 +110,10 @@ func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
|
|||||||
if uint64(offset) > f.node.Size {
|
if uint64(offset) > f.node.Size {
|
||||||
debug.Log("Read(%v): offset is greater than file size: %v > %v",
|
debug.Log("Read(%v): offset is greater than file size: %v > %v",
|
||||||
f.node.Name, req.Offset, f.node.Size)
|
f.node.Name, req.Offset, f.node.Size)
|
||||||
return errors.New("offset greater than files size")
|
|
||||||
|
// return no data
|
||||||
|
resp.Data = resp.Data[:0]
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle special case: file is empty
|
// handle special case: file is empty
|
||||||
|
@@ -4,6 +4,8 @@
|
|||||||
package fuse
|
package fuse
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
|
||||||
@@ -14,10 +16,11 @@ import (
|
|||||||
|
|
||||||
// Config holds settings for the fuse mount.
|
// Config holds settings for the fuse mount.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
OwnerIsRoot bool
|
OwnerIsRoot bool
|
||||||
Host string
|
Host string
|
||||||
Tags []restic.TagList
|
Tags []restic.TagList
|
||||||
Paths []string
|
Paths []string
|
||||||
|
SnapshotTemplate string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Root is the root node of the fuse mount of a repository.
|
// Root is the root node of the fuse mount of a repository.
|
||||||
@@ -27,7 +30,9 @@ type Root struct {
|
|||||||
inode uint64
|
inode uint64
|
||||||
snapshots restic.Snapshots
|
snapshots restic.Snapshots
|
||||||
blobSizeCache *BlobSizeCache
|
blobSizeCache *BlobSizeCache
|
||||||
snCount int
|
|
||||||
|
snCount int
|
||||||
|
lastCheck time.Time
|
||||||
|
|
||||||
*MetaDir
|
*MetaDir
|
||||||
}
|
}
|
||||||
|
@@ -26,6 +26,8 @@ type SnapshotsDir struct {
|
|||||||
tag string
|
tag string
|
||||||
host string
|
host string
|
||||||
snCount int
|
snCount int
|
||||||
|
|
||||||
|
template string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SnapshotsIDSDir is a fuse directory which contains snapshots named by ids.
|
// SnapshotsIDSDir is a fuse directory which contains snapshots named by ids.
|
||||||
@@ -112,12 +114,13 @@ func updateSnapshotIDSNames(d *SnapshotsIDSDir) {
|
|||||||
func NewSnapshotsDir(root *Root, inode uint64, tag string, host string) *SnapshotsDir {
|
func NewSnapshotsDir(root *Root, inode uint64, tag string, host string) *SnapshotsDir {
|
||||||
debug.Log("create snapshots dir, inode %d", inode)
|
debug.Log("create snapshots dir, inode %d", inode)
|
||||||
d := &SnapshotsDir{
|
d := &SnapshotsDir{
|
||||||
root: root,
|
root: root,
|
||||||
inode: inode,
|
inode: inode,
|
||||||
names: make(map[string]*restic.Snapshot),
|
names: make(map[string]*restic.Snapshot),
|
||||||
latest: "",
|
latest: "",
|
||||||
tag: tag,
|
tag: tag,
|
||||||
host: host,
|
host: host,
|
||||||
|
template: root.cfg.SnapshotTemplate,
|
||||||
}
|
}
|
||||||
|
|
||||||
return d
|
return d
|
||||||
@@ -221,18 +224,25 @@ func isElem(e string, list []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const minSnapshotsReloadTime = 60 * time.Second
|
||||||
|
|
||||||
// update snapshots if repository has changed
|
// update snapshots if repository has changed
|
||||||
func updateSnapshots(ctx context.Context, root *Root) {
|
func updateSnapshots(ctx context.Context, root *Root) {
|
||||||
|
if time.Since(root.lastCheck) < minSnapshotsReloadTime {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
snapshots := restic.FindFilteredSnapshots(ctx, root.repo, root.cfg.Host, root.cfg.Tags, root.cfg.Paths)
|
snapshots := restic.FindFilteredSnapshots(ctx, root.repo, root.cfg.Host, root.cfg.Tags, root.cfg.Paths)
|
||||||
if root.snCount != len(snapshots) {
|
if root.snCount != len(snapshots) {
|
||||||
root.snCount = len(snapshots)
|
root.snCount = len(snapshots)
|
||||||
root.repo.LoadIndex(ctx)
|
root.repo.LoadIndex(ctx)
|
||||||
root.snapshots = snapshots
|
root.snapshots = snapshots
|
||||||
}
|
}
|
||||||
|
root.lastCheck = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
// read snapshot timestamps from the current repository-state.
|
// read snapshot timestamps from the current repository-state.
|
||||||
func updateSnapshotNames(d *SnapshotsDir) {
|
func updateSnapshotNames(d *SnapshotsDir, template string) {
|
||||||
if d.snCount != d.root.snCount {
|
if d.snCount != d.root.snCount {
|
||||||
d.snCount = d.root.snCount
|
d.snCount = d.root.snCount
|
||||||
var latestTime time.Time
|
var latestTime time.Time
|
||||||
@@ -241,7 +251,7 @@ func updateSnapshotNames(d *SnapshotsDir) {
|
|||||||
for _, sn := range d.root.snapshots {
|
for _, sn := range d.root.snapshots {
|
||||||
if d.tag == "" || isElem(d.tag, sn.Tags) {
|
if d.tag == "" || isElem(d.tag, sn.Tags) {
|
||||||
if d.host == "" || d.host == sn.Hostname {
|
if d.host == "" || d.host == sn.Hostname {
|
||||||
name := sn.Time.Format(time.RFC3339)
|
name := sn.Time.Format(template)
|
||||||
if d.latest == "" || !sn.Time.Before(latestTime) {
|
if d.latest == "" || !sn.Time.Before(latestTime) {
|
||||||
latestTime = sn.Time
|
latestTime = sn.Time
|
||||||
d.latest = name
|
d.latest = name
|
||||||
@@ -251,7 +261,7 @@ func updateSnapshotNames(d *SnapshotsDir) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
name = fmt.Sprintf("%s-%d", sn.Time.Format(time.RFC3339), i)
|
name = fmt.Sprintf("%s-%d", sn.Time.Format(template), i)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.names[name] = sn
|
d.names[name] = sn
|
||||||
@@ -269,7 +279,7 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
|||||||
updateSnapshots(ctx, d.root)
|
updateSnapshots(ctx, d.root)
|
||||||
|
|
||||||
// update snapshot names
|
// update snapshot names
|
||||||
updateSnapshotNames(d)
|
updateSnapshotNames(d, d.root.cfg.SnapshotTemplate)
|
||||||
|
|
||||||
items := []fuse.Dirent{
|
items := []fuse.Dirent{
|
||||||
{
|
{
|
||||||
@@ -443,7 +453,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
|
|||||||
updateSnapshots(ctx, d.root)
|
updateSnapshots(ctx, d.root)
|
||||||
|
|
||||||
// update snapshot names
|
// update snapshot names
|
||||||
updateSnapshotNames(d)
|
updateSnapshotNames(d, d.root.cfg.SnapshotTemplate)
|
||||||
|
|
||||||
sn, ok := d.names[name]
|
sn, ok := d.names[name]
|
||||||
if ok {
|
if ok {
|
||||||
|
@@ -2,6 +2,7 @@ package limiter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Limiter defines an interface that implementors can use to rate limit I/O
|
// Limiter defines an interface that implementors can use to rate limit I/O
|
||||||
@@ -14,4 +15,7 @@ type Limiter interface {
|
|||||||
// Downstream returns a rate limited reader that is intended to be used
|
// Downstream returns a rate limited reader that is intended to be used
|
||||||
// for downloads.
|
// for downloads.
|
||||||
Downstream(r io.Reader) io.Reader
|
Downstream(r io.Reader) io.Reader
|
||||||
|
|
||||||
|
// Transport returns an http.RoundTripper limited with the limiter.
|
||||||
|
Transport(http.RoundTripper) http.RoundTripper
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,7 @@ package limiter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/juju/ratelimit"
|
"github.com/juju/ratelimit"
|
||||||
)
|
)
|
||||||
@@ -41,6 +42,39 @@ func (l staticLimiter) Downstream(r io.Reader) io.Reader {
|
|||||||
return l.limit(r, l.downstream)
|
return l.limit(r, l.downstream)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type roundTripper func(*http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
func (rt roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return rt(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l staticLimiter) roundTripper(rt http.RoundTripper, req *http.Request) (*http.Response, error) {
|
||||||
|
if req.Body != nil {
|
||||||
|
req.Body = limitedReadCloser{
|
||||||
|
limited: l.Upstream(req.Body),
|
||||||
|
original: req.Body,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := rt.RoundTrip(req)
|
||||||
|
|
||||||
|
if res != nil && res.Body != nil {
|
||||||
|
res.Body = limitedReadCloser{
|
||||||
|
limited: l.Downstream(res.Body),
|
||||||
|
original: res.Body,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transport returns an HTTP transport limited with the limiter l.
|
||||||
|
func (l staticLimiter) Transport(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
return roundTripper(func(req *http.Request) (*http.Response, error) {
|
||||||
|
return l.roundTripper(rt, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (l staticLimiter) limit(r io.Reader, b *ratelimit.Bucket) io.Reader {
|
func (l staticLimiter) limit(r io.Reader, b *ratelimit.Bucket) io.Reader {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return r
|
return r
|
||||||
|
@@ -65,11 +65,11 @@ var invalidOptsTests = []struct {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
[]string{"=bar", "bar=baz", "k="},
|
[]string{"=bar", "bar=baz", "k="},
|
||||||
"empty key is not a valid option",
|
"Fatal: empty key is not a valid option",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]string{"x=1", "foo=bar", "y=2", "foo=baz"},
|
[]string{"x=1", "foo=bar", "y=2", "foo=baz"},
|
||||||
`key "foo" present more than once`,
|
`Fatal: key "foo" present more than once`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,7 +185,7 @@ var invalidSetTests = []struct {
|
|||||||
"first_name": "foobar",
|
"first_name": "foobar",
|
||||||
},
|
},
|
||||||
"ns",
|
"ns",
|
||||||
"option ns.first_name is not known",
|
"Fatal: option ns.first_name is not known",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Options{
|
Options{
|
||||||
|
@@ -169,12 +169,13 @@ func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) {
|
|||||||
|
|
||||||
// Has returns true iff the id is listed in the index.
|
// Has returns true iff the id is listed in the index.
|
||||||
func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool {
|
func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool {
|
||||||
_, err := idx.Lookup(id, tpe)
|
idx.m.Lock()
|
||||||
if err == nil {
|
defer idx.m.Unlock()
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
h := restic.BlobHandle{ID: id, Type: tpe}
|
||||||
|
|
||||||
|
_, ok := idx.pack[h]
|
||||||
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupSize returns the length of the plaintext content of the blob with the
|
// LookupSize returns the length of the plaintext content of the blob with the
|
||||||
|
@@ -2,6 +2,7 @@ package repository_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
@@ -379,3 +380,106 @@ func TestIndexPacks(t *testing.T) {
|
|||||||
idxPacks := idx.Packs()
|
idxPacks := idx.Packs()
|
||||||
rtest.Assert(t, packs.Equals(idxPacks), "packs in index do not match packs added to index")
|
rtest.Assert(t, packs.Equals(idxPacks), "packs in index do not match packs added to index")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxPackSize = 16 * 1024 * 1024
|
||||||
|
|
||||||
|
func createRandomIndex() (idx *repository.Index, lookupID restic.ID) {
|
||||||
|
idx = repository.NewIndex()
|
||||||
|
|
||||||
|
// create index with 200k pack files
|
||||||
|
for i := 0; i < 200000; i++ {
|
||||||
|
packID := restic.NewRandomID()
|
||||||
|
offset := 0
|
||||||
|
for offset < maxPackSize {
|
||||||
|
size := 2000 + rand.Intn(4*1024*1024)
|
||||||
|
id := restic.NewRandomID()
|
||||||
|
idx.Store(restic.PackedBlob{
|
||||||
|
PackID: packID,
|
||||||
|
Blob: restic.Blob{
|
||||||
|
Type: restic.DataBlob,
|
||||||
|
ID: id,
|
||||||
|
Length: uint(size),
|
||||||
|
Offset: uint(offset),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
offset += size
|
||||||
|
|
||||||
|
if rand.Float32() < 0.001 && lookupID.IsNull() {
|
||||||
|
lookupID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return idx, lookupID
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIndexHasUnknown(b *testing.B) {
|
||||||
|
idx, _ := createRandomIndex()
|
||||||
|
lookupID := restic.NewRandomID()
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
idx.Has(lookupID, restic.DataBlob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIndexHasKnown(b *testing.B) {
|
||||||
|
idx, lookupID := createRandomIndex()
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
idx.Has(lookupID, restic.DataBlob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndexHas(t *testing.T) {
|
||||||
|
type testEntry struct {
|
||||||
|
id restic.ID
|
||||||
|
pack restic.ID
|
||||||
|
tpe restic.BlobType
|
||||||
|
offset, length uint
|
||||||
|
}
|
||||||
|
tests := []testEntry{}
|
||||||
|
|
||||||
|
idx := repository.NewIndex()
|
||||||
|
|
||||||
|
// create 50 packs with 20 blobs each
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
packID := restic.NewRandomID()
|
||||||
|
|
||||||
|
pos := uint(0)
|
||||||
|
for j := 0; j < 20; j++ {
|
||||||
|
id := restic.NewRandomID()
|
||||||
|
length := uint(i*100 + j)
|
||||||
|
idx.Store(restic.PackedBlob{
|
||||||
|
Blob: restic.Blob{
|
||||||
|
Type: restic.DataBlob,
|
||||||
|
ID: id,
|
||||||
|
Offset: pos,
|
||||||
|
Length: length,
|
||||||
|
},
|
||||||
|
PackID: packID,
|
||||||
|
})
|
||||||
|
|
||||||
|
tests = append(tests, testEntry{
|
||||||
|
id: id,
|
||||||
|
pack: packID,
|
||||||
|
tpe: restic.DataBlob,
|
||||||
|
offset: pos,
|
||||||
|
length: length,
|
||||||
|
})
|
||||||
|
|
||||||
|
pos += length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testBlob := range tests {
|
||||||
|
rtest.Assert(t, idx.Has(testBlob.id, testBlob.tpe), "Index reports not having data blob added to it")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, !idx.Has(restic.NewRandomID(), restic.DataBlob), "Index reports having a data blob not added to it")
|
||||||
|
rtest.Assert(t, !idx.Has(tests[0].id, restic.TreeBlob), "Index reports having a tree blob added to it with the same id as a data blob")
|
||||||
|
}
|
||||||
|
@@ -79,13 +79,6 @@ func (res *Restorer) restoreTo(ctx context.Context, target, location string, tre
|
|||||||
selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node)
|
selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node)
|
||||||
debug.Log("SelectFilter returned %v %v", selectedForRestore, childMayBeSelected)
|
debug.Log("SelectFilter returned %v %v", selectedForRestore, childMayBeSelected)
|
||||||
|
|
||||||
if selectedForRestore {
|
|
||||||
err = res.restoreNodeTo(ctx, node, nodeTarget, nodeLocation, idx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.Type == "dir" && childMayBeSelected {
|
if node.Type == "dir" && childMayBeSelected {
|
||||||
if node.Subtree == nil {
|
if node.Subtree == nil {
|
||||||
return errors.Errorf("Dir without subtree in tree %v", treeID.Str())
|
return errors.Errorf("Dir without subtree in tree %v", treeID.Str())
|
||||||
@@ -98,14 +91,19 @@ func (res *Restorer) restoreTo(ctx context.Context, target, location string, tre
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if selectedForRestore {
|
if selectedForRestore {
|
||||||
// Restore directory timestamp at the end. If we would do it earlier, restoring files within
|
err = res.restoreNodeTo(ctx, node, nodeTarget, nodeLocation, idx)
|
||||||
// the directory would overwrite the timestamp of the directory they are in.
|
if err != nil {
|
||||||
err = node.RestoreTimestamps(nodeTarget)
|
return err
|
||||||
if err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
// Restore directory timestamp at the end. If we would do it earlier, restoring files within
|
||||||
|
// the directory would overwrite the timestamp of the directory they are in.
|
||||||
|
err = node.RestoreTimestamps(nodeTarget)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -29,6 +29,7 @@ type File struct {
|
|||||||
|
|
||||||
type Dir struct {
|
type Dir struct {
|
||||||
Nodes map[string]Node
|
Nodes map[string]Node
|
||||||
|
Mode os.FileMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID {
|
func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID {
|
||||||
@@ -63,9 +64,15 @@ func saveDir(t testing.TB, repo restic.Repository, nodes map[string]Node) restic
|
|||||||
})
|
})
|
||||||
case Dir:
|
case Dir:
|
||||||
id = saveDir(t, repo, node.Nodes)
|
id = saveDir(t, repo, node.Nodes)
|
||||||
|
|
||||||
|
mode := node.Mode
|
||||||
|
if mode == 0 {
|
||||||
|
mode = 0755
|
||||||
|
}
|
||||||
|
|
||||||
tree.Insert(&restic.Node{
|
tree.Insert(&restic.Node{
|
||||||
Type: "dir",
|
Type: "dir",
|
||||||
Mode: 0755,
|
Mode: mode,
|
||||||
Name: name,
|
Name: name,
|
||||||
UID: uint32(os.Getuid()),
|
UID: uint32(os.Getuid()),
|
||||||
GID: uint32(os.Getgid()),
|
GID: uint32(os.Getgid()),
|
||||||
@@ -166,6 +173,34 @@ func TestRestorer(t *testing.T) {
|
|||||||
"dir/subdir/file": "file in subdir",
|
"dir/subdir/file": "file in subdir",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Snapshot: Snapshot{
|
||||||
|
Nodes: map[string]Node{
|
||||||
|
"dir": Dir{
|
||||||
|
Mode: 0444,
|
||||||
|
},
|
||||||
|
"file": File{"top-level file"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Files: map[string]string{
|
||||||
|
"file": "top-level file",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Snapshot: Snapshot{
|
||||||
|
Nodes: map[string]Node{
|
||||||
|
"dir": Dir{
|
||||||
|
Mode: 0555,
|
||||||
|
Nodes: map[string]Node{
|
||||||
|
"file": File{"file in dir"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Files: map[string]string{
|
||||||
|
"dir/file": "file in dir",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
// test cases with invalid/constructed names
|
// test cases with invalid/constructed names
|
||||||
{
|
{
|
||||||
|
74
internal/serve/dir.go
Normal file
74
internal/serve/dir.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/debug"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"golang.org/x/net/webdav"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RepoDir implements a read-only directory from a repository.
|
||||||
|
type RepoDir struct {
|
||||||
|
fi os.FileInfo
|
||||||
|
nodes []*restic.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// statically ensure that RepoDir implements webdav.File
|
||||||
|
var _ webdav.File = &RepoDir{}
|
||||||
|
|
||||||
|
func (f *RepoDir) Write(p []byte) (int, error) {
|
||||||
|
return 0, webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the repo file.
|
||||||
|
func (f *RepoDir) Close() error {
|
||||||
|
debug.Log("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) byte from the file.
|
||||||
|
func (f *RepoDir) Read(p []byte) (int, error) {
|
||||||
|
debug.Log("")
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek sets the offset for the next Read or Write to offset, interpreted
|
||||||
|
// according to whence: SeekStart means relative to the start of the file,
|
||||||
|
// SeekCurrent means relative to the current offset, and SeekEnd means relative
|
||||||
|
// to the end. Seek returns the new offset relative to the start of the file
|
||||||
|
// and an error, if any.
|
||||||
|
func (f *RepoDir) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
debug.Log("")
|
||||||
|
return 0, webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readdir reads the contents of the directory associated with file and returns
|
||||||
|
// a slice of up to n FileInfo values, as would be returned by Lstat, in
|
||||||
|
// directory order. Subsequent calls on the same file will yield further
|
||||||
|
// FileInfos.
|
||||||
|
//
|
||||||
|
// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
|
||||||
|
// Readdir returns an empty slice, it will return a non-nil error explaining
|
||||||
|
// why. At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, Readdir returns all the FileInfo from the directory in a single
|
||||||
|
// slice. In this case, if Readdir succeeds (reads all the way to the end of
|
||||||
|
// the directory), it returns the slice and a nil error. If it encounters an
|
||||||
|
// error before the end of the directory, Readdir returns the FileInfo read
|
||||||
|
// until that point and a non-nil error.
|
||||||
|
func (f *RepoDir) Readdir(count int) (entries []os.FileInfo, err error) {
|
||||||
|
debug.Log("count %d, %d nodes", count, len(f.nodes))
|
||||||
|
|
||||||
|
entries = make([]os.FileInfo, 0, len(f.nodes))
|
||||||
|
for _, node := range f.nodes {
|
||||||
|
entries = append(entries, fileInfoFromNode(node))
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
func (f *RepoDir) Stat() (os.FileInfo, error) {
|
||||||
|
return f.fi, nil
|
||||||
|
}
|
67
internal/serve/file.go
Normal file
67
internal/serve/file.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"golang.org/x/net/webdav"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RepoFile implements a read-only directory from a repository.
|
||||||
|
type RepoFile struct {
|
||||||
|
fi os.FileInfo
|
||||||
|
node *restic.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// statically ensure that RepoFile implements webdav.File
|
||||||
|
var _ webdav.File = &RepoFile{}
|
||||||
|
|
||||||
|
func (f *RepoFile) Write(p []byte) (int, error) {
|
||||||
|
return 0, webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the repo file.
|
||||||
|
func (f *RepoFile) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) byte from the file.
|
||||||
|
func (f *RepoFile) Read(p []byte) (int, error) {
|
||||||
|
// TODO
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek sets the offset for the next Read or Write to offset, interpreted
|
||||||
|
// according to whence: SeekStart means relative to the start of the file,
|
||||||
|
// SeekCurrent means relative to the current offset, and SeekEnd means relative
|
||||||
|
// to the end. Seek returns the new offset relative to the start of the file
|
||||||
|
// and an error, if any.
|
||||||
|
func (f *RepoFile) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
// TODO
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readdir reads the contents of the directory associated with file and returns
|
||||||
|
// a slice of up to n FileInfo values, as would be returned by Lstat, in
|
||||||
|
// directory order. Subsequent calls on the same file will yield further
|
||||||
|
// FileInfos.
|
||||||
|
//
|
||||||
|
// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
|
||||||
|
// Readdir returns an empty slice, it will return a non-nil error explaining
|
||||||
|
// why. At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, Readdir returns all the FileInfo from the directory in a single
|
||||||
|
// slice. In this case, if Readdir succeeds (reads all the way to the end of
|
||||||
|
// the directory), it returns the slice and a nil error. If it encounters an
|
||||||
|
// error before the end of the directory, Readdir returns the FileInfo read
|
||||||
|
// until that point and a non-nil error.
|
||||||
|
func (f *RepoFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
// TODO
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
func (f *RepoFile) Stat() (os.FileInfo, error) {
|
||||||
|
return f.fi, nil
|
||||||
|
}
|
43
internal/serve/fileinfo.go
Normal file
43
internal/serve/fileinfo.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// virtFileInfo is used to construct an os.FileInfo for a server.
|
||||||
|
type virtFileInfo struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode os.FileMode
|
||||||
|
modtime time.Time
|
||||||
|
isdir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// statically ensure that virtFileInfo implements os.FileInfo.
|
||||||
|
var _ os.FileInfo = virtFileInfo{}
|
||||||
|
|
||||||
|
func (fi virtFileInfo) Name() string { return fi.name }
|
||||||
|
func (fi virtFileInfo) Size() int64 { return fi.size }
|
||||||
|
func (fi virtFileInfo) Mode() os.FileMode { return fi.mode }
|
||||||
|
func (fi virtFileInfo) ModTime() time.Time { return fi.modtime }
|
||||||
|
func (fi virtFileInfo) IsDir() bool { return fi.isdir }
|
||||||
|
func (fi virtFileInfo) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
func fileInfoFromNode(node *restic.Node) os.FileInfo {
|
||||||
|
fi := virtFileInfo{
|
||||||
|
name: node.Name,
|
||||||
|
size: int64(node.Size),
|
||||||
|
mode: node.Mode,
|
||||||
|
modtime: node.ModTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.Type == "dir" {
|
||||||
|
fi.isdir = true
|
||||||
|
fi.mode |= os.ModeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
return fi
|
||||||
|
}
|
231
internal/serve/fs.go
Normal file
231
internal/serve/fs.go
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/debug"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"golang.org/x/net/webdav"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds settings for the file system served.
|
||||||
|
type Config struct {
|
||||||
|
Host string
|
||||||
|
Tags []restic.TagList
|
||||||
|
Paths []string
|
||||||
|
}
|
||||||
|
|
||||||
|
const snapshotFormat = "2006-01-02_150405"
|
||||||
|
|
||||||
|
// RepoFileSystem implements a read-only file system on top of a repositoy.
|
||||||
|
type RepoFileSystem struct {
|
||||||
|
repo restic.Repository
|
||||||
|
lastCheck time.Time
|
||||||
|
|
||||||
|
entries map[string]webdav.File
|
||||||
|
m sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRepoFileSystem returns a new file system for the repo.
|
||||||
|
func NewRepoFileSystem(ctx context.Context, repo restic.Repository, cfg Config) (*RepoFileSystem, error) {
|
||||||
|
snapshots := restic.FindFilteredSnapshots(ctx, repo, cfg.Host, cfg.Tags, cfg.Paths)
|
||||||
|
|
||||||
|
lastcheck := time.Now()
|
||||||
|
|
||||||
|
nodes := make([]*restic.Node, 0, len(snapshots))
|
||||||
|
entries := make(map[string]webdav.File)
|
||||||
|
|
||||||
|
for _, sn := range snapshots {
|
||||||
|
name := sn.Time.Format(snapshotFormat)
|
||||||
|
snFileInfo := virtFileInfo{
|
||||||
|
name: name,
|
||||||
|
size: 0,
|
||||||
|
mode: 0755 | os.ModeDir,
|
||||||
|
modtime: sn.Time,
|
||||||
|
isdir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if sn.Tree == nil {
|
||||||
|
return nil, errors.Errorf("snapshot %v has nil tree", sn.ID().Str())
|
||||||
|
}
|
||||||
|
|
||||||
|
tree, err := repo.LoadTree(ctx, *sn.Tree)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p := path.Join("/", name)
|
||||||
|
entries[p] = &RepoDir{
|
||||||
|
fi: snFileInfo,
|
||||||
|
nodes: tree.Nodes,
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = append(nodes, &restic.Node{
|
||||||
|
Name: name,
|
||||||
|
Type: "dir",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
entries["/"] = &RepoDir{
|
||||||
|
nodes: nodes,
|
||||||
|
fi: virtFileInfo{
|
||||||
|
name: "root",
|
||||||
|
size: 0,
|
||||||
|
mode: 0755 | os.ModeDir,
|
||||||
|
modtime: lastcheck,
|
||||||
|
isdir: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := &RepoFileSystem{
|
||||||
|
repo: repo,
|
||||||
|
lastCheck: lastcheck,
|
||||||
|
entries: entries,
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// statically ensure that RepoFileSystem implements webdav.FileSystem
|
||||||
|
var _ webdav.FileSystem = &RepoFileSystem{}
|
||||||
|
|
||||||
|
// Mkdir creates a new directory, it is not available for RepoFileSystem.
|
||||||
|
func (fs *RepoFileSystem) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||||
|
return webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *RepoFileSystem) loadPath(ctx context.Context, name string) error {
|
||||||
|
debug.Log("%v", name)
|
||||||
|
|
||||||
|
fs.m.Lock()
|
||||||
|
_, ok := fs.entries[name]
|
||||||
|
fs.m.Unlock()
|
||||||
|
if ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dirname := path.Dir(name)
|
||||||
|
if dirname == "/" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := fs.loadPath(ctx, dirname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, ok := fs.entries[dirname]
|
||||||
|
if !ok {
|
||||||
|
// loadPath did not succeed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
repodir, ok := entry.(*RepoDir)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := path.Base(name)
|
||||||
|
for _, node := range repodir.nodes {
|
||||||
|
if node.Name != filename {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
debug.Log("found item %v :%v", filename, node)
|
||||||
|
|
||||||
|
switch node.Type {
|
||||||
|
case "dir":
|
||||||
|
if node.Subtree == nil {
|
||||||
|
return errors.Errorf("tree %v has nil tree", dirname)
|
||||||
|
}
|
||||||
|
|
||||||
|
tree, err := fs.repo.LoadTree(ctx, *node.Subtree)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newEntry := &RepoDir{
|
||||||
|
fi: fileInfoFromNode(node),
|
||||||
|
nodes: tree.Nodes,
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.m.Lock()
|
||||||
|
fs.entries[name] = newEntry
|
||||||
|
fs.m.Unlock()
|
||||||
|
case "file":
|
||||||
|
newEntry := &RepoFile{
|
||||||
|
fi: fileInfoFromNode(node),
|
||||||
|
node: node,
|
||||||
|
}
|
||||||
|
fs.m.Lock()
|
||||||
|
fs.entries[name] = newEntry
|
||||||
|
fs.m.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFile opens a file for reading.
|
||||||
|
func (fs *RepoFileSystem) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
|
||||||
|
name = path.Clean(name)
|
||||||
|
debug.Log("%v", name)
|
||||||
|
if flag != os.O_RDONLY {
|
||||||
|
return nil, webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
err := fs.loadPath(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.m.Lock()
|
||||||
|
entry, ok := fs.entries[name]
|
||||||
|
fs.m.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll recursively removes files and directories, it is not available for RepoFileSystem.
|
||||||
|
func (fs *RepoFileSystem) RemoveAll(ctx context.Context, name string) error {
|
||||||
|
debug.Log("%v", name)
|
||||||
|
return webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename renames files or directories, it is not available for RepoFileSystem.
|
||||||
|
func (fs *RepoFileSystem) Rename(ctx context.Context, oldName, newName string) error {
|
||||||
|
debug.Log("%v -> %v", oldName, newName)
|
||||||
|
return webdav.ErrForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns information on a file or directory.
|
||||||
|
func (fs *RepoFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||||
|
name = path.Clean(name)
|
||||||
|
err := fs.loadPath(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.m.Lock()
|
||||||
|
entry, ok := fs.entries[name]
|
||||||
|
fs.m.Unlock()
|
||||||
|
if !ok {
|
||||||
|
debug.Log("%v not found", name)
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := entry.Stat()
|
||||||
|
debug.Log("%v %v", name, fi)
|
||||||
|
return fi, err
|
||||||
|
}
|
46
internal/serve/webdav.go
Normal file
46
internal/serve/webdav.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"golang.org/x/net/webdav"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WebDAV implements a WebDAV handler on the repo.
|
||||||
|
type WebDAV struct {
|
||||||
|
restic.Repository
|
||||||
|
webdav.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
var logger = log.New(os.Stderr, "webdav log: ", log.Flags())
|
||||||
|
|
||||||
|
func logRequest(req *http.Request, err error) {
|
||||||
|
logger.Printf("req %v %v -> %v\n", req.Method, req.URL.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWebDAV returns a new *WebDAV which allows serving the repo via WebDAV.
|
||||||
|
func NewWebDAV(ctx context.Context, repo restic.Repository, cfg Config) (*WebDAV, error) {
|
||||||
|
fs, err := NewRepoFileSystem(ctx, repo, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wd := &WebDAV{
|
||||||
|
Repository: repo,
|
||||||
|
Handler: webdav.Handler{
|
||||||
|
FileSystem: fs,
|
||||||
|
LockSystem: webdav.NewMemLS(),
|
||||||
|
Logger: logRequest,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return wd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *WebDAV) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||||
|
logger.Printf("handle %v %v\n", req.Method, req.URL.Path)
|
||||||
|
srv.Handler.ServeHTTP(res, req)
|
||||||
|
}
|
@@ -13,6 +13,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
@@ -23,6 +24,7 @@ var opts = struct {
|
|||||||
IgnoreBranchName bool
|
IgnoreBranchName bool
|
||||||
IgnoreUncommittedChanges bool
|
IgnoreUncommittedChanges bool
|
||||||
IgnoreChangelogVersion bool
|
IgnoreChangelogVersion bool
|
||||||
|
IgnoreChangelogRelease bool
|
||||||
IgnoreChangelogCurrent bool
|
IgnoreChangelogCurrent bool
|
||||||
|
|
||||||
tarFilename string
|
tarFilename string
|
||||||
@@ -35,6 +37,7 @@ func init() {
|
|||||||
pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'")
|
pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'")
|
||||||
pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes")
|
pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes")
|
||||||
pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md")
|
pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md")
|
||||||
|
pflag.BoolVar(&opts.IgnoreChangelogRelease, "ignore-changelog-releases", false, "ignore missing entry changelog/releases")
|
||||||
pflag.BoolVar(&opts.IgnoreChangelogCurrent, "ignore-changelog-current", false, "ignore check if CHANGELOG.md is up to date")
|
pflag.BoolVar(&opts.IgnoreChangelogCurrent, "ignore-changelog-current", false, "ignore check if CHANGELOG.md is up to date")
|
||||||
pflag.Parse()
|
pflag.Parse()
|
||||||
}
|
}
|
||||||
@@ -168,6 +171,35 @@ func preCheckChangelogCurrent() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func preCheckChangelogRelease() {
|
||||||
|
if opts.IgnoreChangelogRelease {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(filepath.FromSlash("changelog/releases"))
|
||||||
|
if err != nil {
|
||||||
|
die("unable to open releases file in changelog/: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
if sc.Err() != nil {
|
||||||
|
die("error reading releases file in changelog: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc.Text() == fmt.Sprintf("%v %v", opts.Version, time.Now().Format("2006-01-02")) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.Close()
|
||||||
|
if err != nil {
|
||||||
|
die("close releases error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
die("unable to find correct line for version %v (released today) in changelog/releases", opts.Version)
|
||||||
|
}
|
||||||
|
|
||||||
func preCheckChangelogVersion() {
|
func preCheckChangelogVersion() {
|
||||||
if opts.IgnoreChangelogVersion {
|
if opts.IgnoreChangelogVersion {
|
||||||
return
|
return
|
||||||
@@ -215,7 +247,7 @@ func generateFiles() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func updateVersion() {
|
func updateVersion() {
|
||||||
err := ioutil.WriteFile("VERSION", []byte(opts.Version), 0644)
|
err := ioutil.WriteFile("VERSION", []byte(opts.Version+"\n"), 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
die("unable to write version to file: %v", err)
|
die("unable to write version to file: %v", err)
|
||||||
}
|
}
|
||||||
@@ -306,6 +338,7 @@ func main() {
|
|||||||
preCheckUncommittedChanges()
|
preCheckUncommittedChanges()
|
||||||
preCheckVersionExists()
|
preCheckVersionExists()
|
||||||
preCheckChangelogCurrent()
|
preCheckChangelogCurrent()
|
||||||
|
preCheckChangelogRelease()
|
||||||
preCheckChangelogVersion()
|
preCheckChangelogVersion()
|
||||||
|
|
||||||
generateFiles()
|
generateFiles()
|
||||||
|
Reference in New Issue
Block a user