mirror of
https://github.com/restic/restic.git
synced 2025-08-22 17:57:48 +00:00
Compare commits
396 Commits
v0.9.4
...
debug-chun
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7e72d638df | ||
![]() |
070d43e290 | ||
![]() |
d4bd32a37e | ||
![]() |
e7d7b85d59 | ||
![]() |
be5a0ff59f | ||
![]() |
c5100d5632 | ||
![]() |
956a1b0f96 | ||
![]() |
fab626a3df | ||
![]() |
4f00564574 | ||
![]() |
f77477129f | ||
![]() |
2e31120f89 | ||
![]() |
8fb2c0d3c1 | ||
![]() |
072cf7b02d | ||
![]() |
df66daa5c9 | ||
![]() |
9790d8ce1c | ||
![]() |
16710454f4 | ||
![]() |
08ec6c9f17 | ||
![]() |
7910ff4c0e | ||
![]() |
c4da9d1e90 | ||
![]() |
3ed61987a2 | ||
![]() |
9dba7a2577 | ||
![]() |
a1352906e2 | ||
![]() |
b7c0d4d8bf | ||
![]() |
f033850aa0 | ||
![]() |
bdf7ba20cb | ||
![]() |
3ee6b8ec63 | ||
![]() |
4a400f94bb | ||
![]() |
1a1c572bac | ||
![]() |
5a7c27ddb6 | ||
![]() |
fb842759fc | ||
![]() |
7aa2f8a61e | ||
![]() |
08bf3bae79 | ||
![]() |
e7b741b2d7 | ||
![]() |
6bee62e346 | ||
![]() |
bc74cd3ae5 | ||
![]() |
90243ed1c4 | ||
![]() |
0ce81d88b6 | ||
![]() |
c03bc88b29 | ||
![]() |
b8da7b1f4d | ||
![]() |
f1b4d97945 | ||
![]() |
2b5a6d255a | ||
![]() |
f004dbe605 | ||
![]() |
9efbe98879 | ||
![]() |
7d9300efca | ||
![]() |
c38aaaa768 | ||
![]() |
8941041355 | ||
![]() |
18fee4806f | ||
![]() |
47d4d5bf1b | ||
![]() |
74a64c47e4 | ||
![]() |
a23e9c86ba | ||
![]() |
4de12bf593 | ||
![]() |
c542a509f0 | ||
![]() |
8cf3bb8737 | ||
![]() |
b46cc6d57e | ||
![]() |
4a2156d3f0 | ||
![]() |
41fee11f66 | ||
![]() |
b592614061 | ||
![]() |
b7c3039eb2 | ||
![]() |
a307797c11 | ||
![]() |
a03f107144 | ||
![]() |
a141ab1bda | ||
![]() |
52abec967f | ||
![]() |
2828a9c2b0 | ||
![]() |
cec7d581f3 | ||
![]() |
12aa1e61da | ||
![]() |
95da6c1c1d | ||
![]() |
0c03a80fc4 | ||
![]() |
b1c77172c2 | ||
![]() |
c0373cd307 | ||
![]() |
28121090c2 | ||
![]() |
44a57d66c3 | ||
![]() |
266f9dbe16 | ||
![]() |
60e4a88f17 | ||
![]() |
c50f91b7f9 | ||
![]() |
e851d29565 | ||
![]() |
b67b7ebfe6 | ||
![]() |
751eba0e68 | ||
![]() |
7447c44484 | ||
![]() |
c8a672fa29 | ||
![]() |
863ba76494 | ||
![]() |
8526cc6647 | ||
![]() |
694b7a17e7 | ||
![]() |
5cd0bce452 | ||
![]() |
58bd165253 | ||
![]() |
65d3fb6b33 | ||
![]() |
f165048172 | ||
![]() |
de5516a90e | ||
![]() |
4f6fd9fb98 | ||
![]() |
9a9101d144 | ||
![]() |
616f9499ae | ||
![]() |
a40ac37550 | ||
![]() |
99fd80a585 | ||
![]() |
2464f7c4d1 | ||
![]() |
b5c7778428 | ||
![]() |
c52198d12c | ||
![]() |
f17ffa0283 | ||
![]() |
5e2afd91e7 | ||
![]() |
79b882e901 | ||
![]() |
1b502fa9ef | ||
![]() |
e1969d1e33 | ||
![]() |
6ac6bca7a1 | ||
![]() |
3a6feb0596 | ||
![]() |
2f8aa2ce30 | ||
![]() |
f2bf06a419 | ||
![]() |
71900248ab | ||
![]() |
23055aaadf | ||
![]() |
8bf6a3af97 | ||
![]() |
27d6e5e186 | ||
![]() |
4214b1746e | ||
![]() |
f6f240573a | ||
![]() |
ecdf49679e | ||
![]() |
e1f722d266 | ||
![]() |
2d47381b1d | ||
![]() |
294ca55967 | ||
![]() |
78c518ccac | ||
![]() |
42a3292bcf | ||
![]() |
ef70a2fcb3 | ||
![]() |
af20015725 | ||
![]() |
680a14afa1 | ||
![]() |
bf199e5ca7 | ||
![]() |
ae127a412d | ||
![]() |
5ae7e6f945 | ||
![]() |
d8da9c4401 | ||
![]() |
daf3bfc882 | ||
![]() |
94f4f13388 | ||
![]() |
4615bdfd70 | ||
![]() |
299f5971f2 | ||
![]() |
7a1352ae87 | ||
![]() |
72734d59b5 | ||
![]() |
3679669aae | ||
![]() |
3ed54e762e | ||
![]() |
fea835b4e2 | ||
![]() |
16b321b140 | ||
![]() |
b58dfda212 | ||
![]() |
b229aa5cf1 | ||
![]() |
81c0b031f9 | ||
![]() |
760863e7f9 | ||
![]() |
a135699397 | ||
![]() |
94a4d45dfb | ||
![]() |
4de384087d | ||
![]() |
57815d9cd6 | ||
![]() |
644673bcf2 | ||
![]() |
e7cdf2acbb | ||
![]() |
0f22f008ed | ||
![]() |
c184da21d0 | ||
![]() |
ef5efc6a82 | ||
![]() |
688014487b | ||
![]() |
38ddfbc4d3 | ||
![]() |
da48b925ff | ||
![]() |
4bcd56fbae | ||
![]() |
6e9778ae0e | ||
![]() |
63c67be908 | ||
![]() |
d70a4a9350 | ||
![]() |
2cd9c7ef16 | ||
![]() |
6ec5dc8016 | ||
![]() |
fe430a680a | ||
![]() |
69a0d0ee90 | ||
![]() |
4557881066 | ||
![]() |
9b5d069ade | ||
![]() |
c56cbfe95b | ||
![]() |
97e5ce4344 | ||
![]() |
72bd467e23 | ||
![]() |
d818b7618b | ||
![]() |
1c3812a6f6 | ||
![]() |
ec91b80f09 | ||
![]() |
18a336c154 | ||
![]() |
d21a13f1ee | ||
![]() |
e0eac30ee5 | ||
![]() |
fccb579471 | ||
![]() |
6c700f95b5 | ||
![]() |
95d070c147 | ||
![]() |
da4473aba6 | ||
![]() |
6e85a58045 | ||
![]() |
476e2e8762 | ||
![]() |
246bb46e82 | ||
![]() |
fe0361ffcb | ||
![]() |
952469473f | ||
![]() |
02108f202e | ||
![]() |
b02357f542 | ||
![]() |
f2aeaef8f1 | ||
![]() |
547702d285 | ||
![]() |
599831f874 | ||
![]() |
14c90d9e85 | ||
![]() |
c41bbb3761 | ||
![]() |
8a54a0f5ac | ||
![]() |
c0a5530950 | ||
![]() |
77ef92b95c | ||
![]() |
69f75bbf70 | ||
![]() |
30519f01ff | ||
![]() |
b723ca3de5 | ||
![]() |
f5084d70d7 | ||
![]() |
29b7b17491 | ||
![]() |
e14c4b1737 | ||
![]() |
745d79fe5f | ||
![]() |
fb95426f64 | ||
![]() |
4cadc89ad3 | ||
![]() |
409909a7f5 | ||
![]() |
df500a372d | ||
![]() |
a444731dc0 | ||
![]() |
a6e8af7e0f | ||
![]() |
aa5af8af0e | ||
![]() |
4e3353109d | ||
![]() |
02c8d38095 | ||
![]() |
fd6211653c | ||
![]() |
3d4f2dd6b4 | ||
![]() |
c1ddc0c18b | ||
![]() |
c95f032a9c | ||
![]() |
3087776135 | ||
![]() |
b6f01ffbe6 | ||
![]() |
41fe9318b1 | ||
![]() |
8387d18d4d | ||
![]() |
929d2b8df3 | ||
![]() |
4f0682d730 | ||
![]() |
967d1bbf0c | ||
![]() |
2f80b37b93 | ||
![]() |
4d2aa18273 | ||
![]() |
6b1e5d4e18 | ||
![]() |
26d1f9f4ba | ||
![]() |
6a89c0f0ef | ||
![]() |
b87230b93d | ||
![]() |
6f2b8d622a | ||
![]() |
90440212f2 | ||
![]() |
3a5c9aadad | ||
![]() |
a78142c1bb | ||
![]() |
07045c7e23 | ||
![]() |
0a5d42db3f | ||
![]() |
67d99b8cfb | ||
![]() |
1a0c0dc277 | ||
![]() |
e86d9307d0 | ||
![]() |
923e681af3 | ||
![]() |
37770b1d82 | ||
![]() |
02fea4f76a | ||
![]() |
7cacba0394 | ||
![]() |
e6db3596f1 | ||
![]() |
3acc7af310 | ||
![]() |
5c4653f427 | ||
![]() |
f7317a9287 | ||
![]() |
30db8057e4 | ||
![]() |
0e897ef7b8 | ||
![]() |
b3e727f40d | ||
![]() |
17feccd998 | ||
![]() |
1596d06f8e | ||
![]() |
db20c0b8d0 | ||
![]() |
604b18aa74 | ||
![]() |
01c51b3449 | ||
![]() |
de8cf5e345 | ||
![]() |
cfa2ac69e0 | ||
![]() |
3ca306d69a | ||
![]() |
1e9eefa066 | ||
![]() |
e9af012229 | ||
![]() |
8066e93f47 | ||
![]() |
e19622e4b1 | ||
![]() |
38ea7ed4f6 | ||
![]() |
76d1866444 | ||
![]() |
8b22fe29cf | ||
![]() |
02014be76c | ||
![]() |
16eeed2ad5 | ||
![]() |
3f94f63967 | ||
![]() |
88716794e3 | ||
![]() |
3ca424050f | ||
![]() |
fea2464d4d | ||
![]() |
5d272e5c08 | ||
![]() |
5bd5db4294 | ||
![]() |
4429a66b5f | ||
![]() |
8066195e6e | ||
![]() |
f7f14cf8c9 | ||
![]() |
5096f3b491 | ||
![]() |
cf3fc2a5b1 | ||
![]() |
920d458a4a | ||
![]() |
b016dc2ff0 | ||
![]() |
355db0bc29 | ||
![]() |
6e2fe73189 | ||
![]() |
303a5dab6a | ||
![]() |
7dcd2968b6 | ||
![]() |
298f490195 | ||
![]() |
37cb82b28b | ||
![]() |
bce6438d22 | ||
![]() |
919dd2ac84 | ||
![]() |
870bc5108e | ||
![]() |
418296c5c9 | ||
![]() |
a6481b3707 | ||
![]() |
00b527fb09 | ||
![]() |
0ebfc55ee3 | ||
![]() |
35b7607802 | ||
![]() |
fad9f65c65 | ||
![]() |
939f3e972c | ||
![]() |
ca8c3b4fd5 | ||
![]() |
4f45b14f25 | ||
![]() |
389067fb8b | ||
![]() |
4b0ca9ddab | ||
![]() |
b8c2544dcb | ||
![]() |
c7762453cf | ||
![]() |
303210aa08 | ||
![]() |
c029881379 | ||
![]() |
6e89963c21 | ||
![]() |
1ac560181b | ||
![]() |
18ec27a0da | ||
![]() |
b40dea29ad | ||
![]() |
0561155963 | ||
![]() |
1aafc17212 | ||
![]() |
f11789c437 | ||
![]() |
8cab0c121d | ||
![]() |
5979414bcd | ||
![]() |
cc8b690b52 | ||
![]() |
a164dc9391 | ||
![]() |
9a26be4e5b | ||
![]() |
733519d895 | ||
![]() |
3d5a0c799b | ||
![]() |
c4475ac58f | ||
![]() |
c9fd9b5275 | ||
![]() |
cadcab5a19 | ||
![]() |
5ac9c1157a | ||
![]() |
5715517e29 | ||
![]() |
ecc2458de8 | ||
![]() |
2c6ba5d9ac | ||
![]() |
0cc3647e51 | ||
![]() |
6b700d02f5 | ||
![]() |
2b09a10234 | ||
![]() |
1c87d01bad | ||
![]() |
78a3ffcfb9 | ||
![]() |
4d77c0c21c | ||
![]() |
fb064afa34 | ||
![]() |
7304738872 | ||
![]() |
66efa425bf | ||
![]() |
d51e9d1b98 | ||
![]() |
e046428c94 | ||
![]() |
75906edef5 | ||
![]() |
203d775190 | ||
![]() |
ecd7ee85e8 | ||
![]() |
2022355800 | ||
![]() |
36f22a0feb | ||
![]() |
f58a44b911 | ||
![]() |
fe886a6439 | ||
![]() |
be23313072 | ||
![]() |
3c112d9cae | ||
![]() |
2970e38d92 | ||
![]() |
870e7583a1 | ||
![]() |
db1c835c37 | ||
![]() |
190bed9908 | ||
![]() |
85f4c826db | ||
![]() |
5da4b0fc7d | ||
![]() |
c1058005c3 | ||
![]() |
ca73808649 | ||
![]() |
f2ea91df38 | ||
![]() |
15cc4d74b2 | ||
![]() |
bf9a507148 | ||
![]() |
65b476ead9 | ||
![]() |
aaa1cc2c26 | ||
![]() |
95434cff16 | ||
![]() |
1b94ae1c00 | ||
![]() |
d138b38f28 | ||
![]() |
db8f5864fc | ||
![]() |
1d8b21cdad | ||
![]() |
3865b59716 | ||
![]() |
7b8d1dc040 | ||
![]() |
d19a29f79e | ||
![]() |
449c049ce9 | ||
![]() |
9f436d80e1 | ||
![]() |
e277a92a2f | ||
![]() |
d9e22c2df1 | ||
![]() |
4b0fb5af36 | ||
![]() |
7519c73987 | ||
![]() |
45a48eb4a8 | ||
![]() |
a2f30cde4c | ||
![]() |
6ebcfe7c18 | ||
![]() |
0022926eba | ||
![]() |
3e3a0220ec | ||
![]() |
c125fb763d | ||
![]() |
b9f0f031b6 | ||
![]() |
aa7043151a | ||
![]() |
ebf22a35f4 | ||
![]() |
3f069ac404 | ||
![]() |
56e5467096 | ||
![]() |
5ee932a124 | ||
![]() |
fed25714a4 | ||
![]() |
8906d85ab8 | ||
![]() |
97aafc1eec | ||
![]() |
6a5c9f57c2 | ||
![]() |
6cf13483b5 | ||
![]() |
f645306a18 | ||
![]() |
186e10e0cb | ||
![]() |
29a5bd5b30 | ||
![]() |
06a01bc016 | ||
![]() |
cdc287a7f6 | ||
![]() |
deedc38129 | ||
![]() |
1107eef215 | ||
![]() |
60c7020bcb | ||
![]() |
b96ef48562 | ||
![]() |
6f69ae1b8d | ||
![]() |
c4fbf2c779 | ||
![]() |
7c084014fa | ||
![]() |
879f6e0c81 | ||
![]() |
8a97bb8661 | ||
![]() |
5fe6de219d | ||
![]() |
c13f79da02 | ||
![]() |
db82e6b80c |
27
.github/ISSUE_TEMPLATE.md
vendored
27
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,27 +0,0 @@
|
||||
<!--
|
||||
|
||||
Welcome! If you have a question or are unsure if you should open an issue,
|
||||
please use the forum instead!
|
||||
|
||||
https://forum.restic.net
|
||||
|
||||
The forum is a better place for questions about restic or general suggestions
|
||||
and topics, e.g. usage or documentation questions! This issue tracker is mainly
|
||||
for tracking bugs and feature requests directly relating to the development of
|
||||
the software itself, rather than the project.
|
||||
|
||||
Thanks for understanding, and for contributing to the project!
|
||||
-->
|
||||
|
||||
|
||||
Output of `restic version`
|
||||
--------------------------
|
||||
|
||||
<!--
|
||||
Please add the version of restic you're currently using here, this helps us
|
||||
later to see what has changed in restic when we revisit this issue after some
|
||||
time.
|
||||
-->
|
||||
|
||||
Describe the issue
|
||||
------------------
|
4
.github/ISSUE_TEMPLATE/Bug.md
vendored
4
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -83,8 +83,8 @@ Do you have an idea how to solve the issue?
|
||||
|
||||
|
||||
|
||||
Did restic help you or made you happy in any way?
|
||||
-------------------------------------------------
|
||||
Did restic help you today? Did it make you happy in any way?
|
||||
------------------------------------------------------------
|
||||
|
||||
<!--
|
||||
Answering this question is not required, but if you have anything positive to share, please do so here!
|
||||
|
4
.github/ISSUE_TEMPLATE/Feature.md
vendored
4
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -47,8 +47,8 @@ This section should contain a brief description what you're trying to do, which
|
||||
would be possible after implementing the new feature.
|
||||
-->
|
||||
|
||||
Did restic help you or made you happy in any way?
|
||||
-------------------------------------------------
|
||||
Did restic help you today? Did it make you happy in any way?
|
||||
------------------------------------------------------------
|
||||
|
||||
<!--
|
||||
Answering this question is not required, but if you have anything positive to share, please do so here!
|
||||
|
4
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
4
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
contact_links:
|
||||
- name: restic forum
|
||||
url: https://forum.restic.net
|
||||
about: Please ask questions about using restic here, do not open an issue for questions.
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,12 +22,16 @@ Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
|
||||
If this PR resolves an issue on GitHub, use "closes #1234" so that the issue is
|
||||
closed automatically when this PR is merged.
|
||||
-->
|
||||
|
||||
Checklist
|
||||
---------
|
||||
|
||||
- [ ] I have read the [Contribution Guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches)
|
||||
- [ ] I have enabled [maintainer edits for this PR](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork)
|
||||
- [ ] I have added tests for all changes in this PR
|
||||
- [ ] I have added documentation for the changes (in the manual)
|
||||
- [ ] There's a new file in `changelog/unreleased/` that describes the changes for our users (template [here](https://github.com/restic/restic/blob/master/changelog/TEMPLATE))
|
||||
|
21
.travis.yml
21
.travis.yml
@@ -4,15 +4,23 @@ sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
go: "1.9.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 RESTIC_BUILD_SOLARIS=0
|
||||
go: "1.11.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: linux
|
||||
go: "1.10.x"
|
||||
go: "1.12.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: linux
|
||||
go: "1.13.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
@@ -21,7 +29,7 @@ matrix:
|
||||
|
||||
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
|
||||
- os: linux
|
||||
go: "1.11.x"
|
||||
go: "1.14.x"
|
||||
sudo: true
|
||||
cache:
|
||||
directories:
|
||||
@@ -29,7 +37,7 @@ matrix:
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: osx
|
||||
go: "1.11.x"
|
||||
go: "1.14.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
@@ -56,6 +64,3 @@ install:
|
||||
|
||||
script:
|
||||
- go run run_integration_tests.go
|
||||
|
||||
after_success:
|
||||
- test -r all.cov && bash <(curl -s https://codecov.io/bash) -f all.cov
|
||||
|
245
CHANGELOG.md
245
CHANGELOG.md
@@ -1,3 +1,248 @@
|
||||
Changelog for restic 0.9.6 (2019-11-22)
|
||||
=======================================
|
||||
|
||||
The following sections list the changes in restic 0.9.6 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
* Fix #2063: Allow absolute path for filename when backing up from stdin
|
||||
* Fix #2174: Save files with invalid timestamps
|
||||
* Fix #2249: Read fresh metadata for unmodified files
|
||||
* Fix #2301: Add upper bound for t in --read-data-subset=n/t
|
||||
* Fix #2321: Check errors when loading index files
|
||||
* Enh #2179: Use ctime when checking for file changes
|
||||
* Enh #2306: Allow multiple retries for interactive password input
|
||||
* Enh #2330: Make `--group-by` accept both singular and plural
|
||||
* Enh #2350: Add option to configure S3 region
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
* Bugfix #2063: Allow absolute path for filename when backing up from stdin
|
||||
|
||||
When backing up from stdin, handle directory path for `--stdin-filename`. This can be used to
|
||||
specify the full path for the backed-up file.
|
||||
|
||||
https://github.com/restic/restic/issues/2063
|
||||
|
||||
* Bugfix #2174: Save files with invalid timestamps
|
||||
|
||||
When restic reads invalid timestamps (year is before 0000 or after 9999) it refused to read and
|
||||
archive the file. We've changed the behavior and will now save modified timestamps with the
|
||||
year set to either 0000 or 9999, the rest of the timestamp stays the same, so the file will be saved
|
||||
(albeit with a bogus timestamp).
|
||||
|
||||
https://github.com/restic/restic/issues/2174
|
||||
https://github.com/restic/restic/issues/1173
|
||||
|
||||
* Bugfix #2249: Read fresh metadata for unmodified files
|
||||
|
||||
Restic took all metadata for files which were detected as unmodified, not taking into account
|
||||
changed metadata (ownership, mode). This is now corrected.
|
||||
|
||||
https://github.com/restic/restic/issues/2249
|
||||
https://github.com/restic/restic/pull/2252
|
||||
|
||||
* Bugfix #2301: Add upper bound for t in --read-data-subset=n/t
|
||||
|
||||
256 is the effective maximum for t, but restic would allow larger values, leading to strange
|
||||
behavior.
|
||||
|
||||
https://github.com/restic/restic/issues/2301
|
||||
https://github.com/restic/restic/pull/2304
|
||||
|
||||
* Bugfix #2321: Check errors when loading index files
|
||||
|
||||
Restic now checks and handles errors which occur when loading index files, the missing check
|
||||
leads to odd errors (and a stack trace printed to users) later. This was reported in the forum.
|
||||
|
||||
https://github.com/restic/restic/pull/2321
|
||||
https://forum.restic.net/t/check-rebuild-index-prune/1848/13
|
||||
|
||||
* Enhancement #2179: Use ctime when checking for file changes
|
||||
|
||||
Previously, restic only checked a file's mtime (along with other non-timestamp metadata) to
|
||||
decide if a file has changed. This could cause restic to not notice that a file has changed (and
|
||||
therefore continue to store the old version, as opposed to the modified version) if something
|
||||
edits the file and then resets the timestamp. Restic now also checks the ctime of files, so any
|
||||
modifications to a file should be noticed, and the modified file will be backed up. The ctime
|
||||
check will be disabled if the --ignore-inode flag was given.
|
||||
|
||||
If this change causes problems for you, please open an issue, and we can look in to adding a
|
||||
seperate flag to disable just the ctime check.
|
||||
|
||||
https://github.com/restic/restic/issues/2179
|
||||
https://github.com/restic/restic/pull/2212
|
||||
|
||||
* Enhancement #2306: Allow multiple retries for interactive password input
|
||||
|
||||
Restic used to quit if the repository password was typed incorrectly once. Restic will now ask
|
||||
the user again for the repository password if typed incorrectly. The user will now get three
|
||||
tries to input the correct password before restic quits.
|
||||
|
||||
https://github.com/restic/restic/issues/2306
|
||||
|
||||
* Enhancement #2330: Make `--group-by` accept both singular and plural
|
||||
|
||||
One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` interchangeably
|
||||
in the `--group-by` argument.
|
||||
|
||||
https://github.com/restic/restic/issues/2330
|
||||
|
||||
* Enhancement #2350: Add option to configure S3 region
|
||||
|
||||
We've added a new option for setting the region when accessing an S3-compatible service. For
|
||||
some providers, it is required to set this to a valid value. You can do that either by setting the
|
||||
environment variable `AWS_DEFAULT_REGION` or using the option `s3.region`, e.g. like this:
|
||||
`-o s3.region="us-east-1"`.
|
||||
|
||||
https://github.com/restic/restic/pull/2350
|
||||
|
||||
|
||||
Changelog for restic 0.9.5 (2019-04-23)
|
||||
=======================================
|
||||
|
||||
The following sections list the changes in restic 0.9.5 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
* Fix #2135: Return error when no bytes could be read from stdin
|
||||
* Fix #2181: Don't cancel timeout after 30 seconds for self-update
|
||||
* Fix #2203: Fix reading passwords from stdin
|
||||
* Fix #2224: Don't abort the find command when a tree can't be loaded
|
||||
* Enh #1895: Add case insensitive include & exclude options
|
||||
* Enh #1937: Support streaming JSON output for backup
|
||||
* Enh #2155: Add Openstack application credential auth for Swift
|
||||
* Enh #2184: Add --json support to forget command
|
||||
* Enh #2037: Add group-by option to snapshots command
|
||||
* Enh #2124: Ability to dump folders to tar via stdout
|
||||
* Enh #2139: Return error if no bytes could be read for `backup --stdin`
|
||||
* Enh #2205: Add --ignore-inode option to backup cmd
|
||||
* Enh #2220: Add config option to set S3 storage class
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
* Bugfix #2135: Return error when no bytes could be read from stdin
|
||||
|
||||
We assume that users reading backup data from stdin want to know when no data could be read, so now
|
||||
restic returns an error when `backup --stdin` is called but no bytes could be read. Usually,
|
||||
this means that an earlier command in a pipe has failed. The documentation was amended and now
|
||||
recommends setting the `pipefail` option (`set -o pipefail`).
|
||||
|
||||
https://github.com/restic/restic/pull/2135
|
||||
https://github.com/restic/restic/pull/2139
|
||||
|
||||
* Bugfix #2181: Don't cancel timeout after 30 seconds for self-update
|
||||
|
||||
https://github.com/restic/restic/issues/2181
|
||||
|
||||
* Bugfix #2203: Fix reading passwords from stdin
|
||||
|
||||
Passwords for the `init`, `key add`, and `key passwd` commands can now be read from
|
||||
non-terminal stdin.
|
||||
|
||||
https://github.com/restic/restic/issues/2203
|
||||
|
||||
* Bugfix #2224: Don't abort the find command when a tree can't be loaded
|
||||
|
||||
Change the find command so that missing trees don't result in a crash. Instead, the error is
|
||||
logged to the debug log, and the tree ID is displayed along with the snapshot it belongs to. This
|
||||
makes it possible to recover repositories that are missing trees by forgetting the snapshots
|
||||
they are used in.
|
||||
|
||||
https://github.com/restic/restic/issues/2224
|
||||
|
||||
* Enhancement #1895: Add case insensitive include & exclude options
|
||||
|
||||
The backup and restore commands now have --iexclude and --iinclude flags as case insensitive
|
||||
variants of --exclude and --include.
|
||||
|
||||
https://github.com/restic/restic/issues/1895
|
||||
https://github.com/restic/restic/pull/2032
|
||||
|
||||
* Enhancement #1937: Support streaming JSON output for backup
|
||||
|
||||
We've added support for getting machine-readable status output during backup, just pass the
|
||||
flag `--json` for `restic backup` and restic will output a stream of JSON objects which contain
|
||||
the current progress.
|
||||
|
||||
https://github.com/restic/restic/issues/1937
|
||||
https://github.com/restic/restic/pull/1944
|
||||
|
||||
* Enhancement #2155: Add Openstack application credential auth for Swift
|
||||
|
||||
Since Openstack Queens Identity (auth V3) service supports an application credential auth
|
||||
method. It allows to create a technical account with the limited roles. This commit adds an
|
||||
application credential authentication method for the Swift backend.
|
||||
|
||||
https://github.com/restic/restic/issues/2155
|
||||
|
||||
* Enhancement #2184: Add --json support to forget command
|
||||
|
||||
The forget command now supports the --json argument, outputting the information about what is
|
||||
(or would-be) kept and removed from the repository.
|
||||
|
||||
https://github.com/restic/restic/issues/2184
|
||||
https://github.com/restic/restic/pull/2185
|
||||
|
||||
* Enhancement #2037: Add group-by option to snapshots command
|
||||
|
||||
We have added an option to group the output of the snapshots command, similar to the output of the
|
||||
forget command. The option has been called "--group-by" and accepts any combination of the
|
||||
values "host", "paths" and "tags", separated by commas. Default behavior (not specifying
|
||||
--group-by) has not been changed. We have added support of the grouping to the JSON output.
|
||||
|
||||
https://github.com/restic/restic/issues/2037
|
||||
https://github.com/restic/restic/pull/2087
|
||||
|
||||
* Enhancement #2124: Ability to dump folders to tar via stdout
|
||||
|
||||
We've added the ability to dump whole folders to stdout via the `dump` command. Restic now
|
||||
requires at least Go 1.10 due to a limitation of the standard library for Go <= 1.9.
|
||||
|
||||
https://github.com/restic/restic/issues/2123
|
||||
https://github.com/restic/restic/pull/2124
|
||||
|
||||
* Enhancement #2139: Return error if no bytes could be read for `backup --stdin`
|
||||
|
||||
When restic is used to backup the output of a program, like `mysqldump | restic backup --stdin`,
|
||||
it now returns an error if no bytes could be read at all. This catches the failure case when
|
||||
`mysqldump` failed for some reason and did not output any data to stdout.
|
||||
|
||||
https://github.com/restic/restic/pull/2139
|
||||
|
||||
* Enhancement #2205: Add --ignore-inode option to backup cmd
|
||||
|
||||
This option handles backup of virtual filesystems that do not keep fixed inodes for files, like
|
||||
Fuse-based, pCloud, etc. Ignoring inode changes allows to consider the file as unchanged if
|
||||
last modification date and size are unchanged.
|
||||
|
||||
https://github.com/restic/restic/issues/1631
|
||||
https://github.com/restic/restic/pull/2205
|
||||
https://github.com/restic/restic/pull/2047
|
||||
|
||||
* Enhancement #2220: Add config option to set S3 storage class
|
||||
|
||||
The `s3.storage-class` option can be passed to restic (using `-o`) to specify the storage
|
||||
class to be used for S3 objects created by restic.
|
||||
|
||||
The storage class is passed as-is to S3, so it needs to be understood by the API. On AWS, it can be
|
||||
one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING` and
|
||||
`REDUCED_REDUNDANCY`. If unspecified, the default storage class is used (`STANDARD` on
|
||||
AWS).
|
||||
|
||||
You can mix storage classes in the same bucket, and the setting isn't stored in the restic
|
||||
repository, so be sure to specify it with each command that writes to S3.
|
||||
|
||||
https://github.com/restic/restic/issues/706
|
||||
https://github.com/restic/restic/pull/2220
|
||||
|
||||
|
||||
Changelog for restic 0.9.4 (2019-01-06)
|
||||
=======================================
|
||||
|
||||
|
@@ -46,15 +46,12 @@ Remember, the easier it is for us to reproduce the bug, the earlier it will be
|
||||
corrected!
|
||||
|
||||
In addition, you can compile restic with debug support by running
|
||||
`go run -mod=vendor build.go -tags debug` and instructing it to create a debug
|
||||
`go run build.go -tags debug` and instructing it to create a debug
|
||||
log by setting the environment variable `DEBUG_LOG` to a file, e.g. like this:
|
||||
|
||||
$ export DEBUG_LOG=/tmp/restic-debug.log
|
||||
$ restic backup ~/work
|
||||
|
||||
For Go < 1.11, you need to remove the `-mod=vendor` option from the build
|
||||
command.
|
||||
|
||||
Please be aware that the debug log file will contain potentially sensitive
|
||||
things like file and directory names, so please either redact it before
|
||||
uploading it somewhere or post only the parts that are really relevant.
|
||||
@@ -133,8 +130,7 @@ down to the following steps:
|
||||
|
||||
2. Clone the repository locally and create a new branch. If you are working on
|
||||
the code itself, please set up the development environment as described in
|
||||
the previous section. Especially take care to place your forked repository
|
||||
at the correct path (`src/github.com/restic/restic`) within your `GOPATH`.
|
||||
the previous section.
|
||||
|
||||
3. Then commit your changes as fine grained as possible, as smaller patches,
|
||||
that handle one and only one issue are easier to discuss and merge.
|
||||
@@ -142,7 +138,7 @@ down to the following steps:
|
||||
4. Push the new branch with your changes to your fork of the repository.
|
||||
|
||||
5. Create a pull request by visiting the GitHub website, it will guide you
|
||||
through the process.
|
||||
through the process. Please [allow edits from maintainers](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork).
|
||||
|
||||
6. You will receive comments on your code and the feature or bug that they
|
||||
address. Maybe you need to rework some minor things, in this case push new
|
||||
@@ -150,11 +146,14 @@ down to the following steps:
|
||||
existing commit, use common sense to decide which is better), they will be
|
||||
automatically added to the pull request.
|
||||
|
||||
7. If your pull request changes anything that users should be aware of (a
|
||||
bugfix, a new feature, ...) please add an entry to the file
|
||||
['CHANGELOG.md'](CHANGELOG.md). It will be used in the announcement of the
|
||||
next stable release. While writing, ask yourself: If I were the user, what
|
||||
would I need to be aware of with this change.
|
||||
7. If your pull request changes anything that users should be aware
|
||||
of (a bugfix, a new feature, ...) please add an entry as a new
|
||||
file in `changelog/unreleased` including the issue number in the
|
||||
filename (e.g. `issue-8756`). Use the template in
|
||||
`changelog/TEMPLATE` for the content. It will be used in the
|
||||
announcement of the next stable release. While writing, ask
|
||||
yourself: If I were the user, what would I need to be aware of
|
||||
with this change.
|
||||
|
||||
8. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
a lot for your contribution!
|
||||
|
2
Makefile
2
Makefile
@@ -3,7 +3,7 @@
|
||||
all: restic
|
||||
|
||||
restic:
|
||||
go run -mod=vendor build.go || go run build.go
|
||||
go run build.go
|
||||
|
||||
clean:
|
||||
rm -f restic
|
||||
|
@@ -1,4 +1,4 @@
|
||||
|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound|
|
||||
|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |Reviewed by Hound|
|
||||
|
||||
Introduction
|
||||
------------
|
||||
@@ -129,8 +129,6 @@ Storage are sponsored by `AppsCode <https://appscode.com>`__!
|
||||
:target: https://goreportcard.com/report/github.com/restic/restic
|
||||
.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg
|
||||
:target: https://saythanks.io/to/restic
|
||||
.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg
|
||||
:target: https://codecov.io/gh/restic/restic
|
||||
.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png
|
||||
:target: https://appscode.com
|
||||
.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg
|
||||
|
@@ -20,8 +20,8 @@ init:
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.11.windows-amd64.msi
|
||||
- msiexec /i go1.11.windows-amd64.msi /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.14.windows-amd64.msi
|
||||
- msiexec /i go1.14.windows-amd64.msi /q
|
||||
- go version
|
||||
- go env
|
||||
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
||||
@@ -29,4 +29,4 @@ install:
|
||||
- set PATH=bin/;%PATH%
|
||||
|
||||
build_script:
|
||||
- go run -mod=vendor run_integration_tests.go
|
||||
- go run run_integration_tests.go
|
||||
|
280
build.go
280
build.go
@@ -3,22 +3,15 @@
|
||||
// This program aims to make building Go programs for end users easier by just
|
||||
// calling it with `go run`, without having to setup a GOPATH.
|
||||
//
|
||||
// For Go < 1.11, it'll create a new GOPATH in a temporary directory, then run
|
||||
// `go build` on the package configured as Main in the Config struct.
|
||||
//
|
||||
// For Go >= 1.11 if the file go.mod is present, it'll use Go modules and not
|
||||
// setup a GOPATH. It builds the package configured as Main in the Config
|
||||
// struct with `go build -mod=vendor` to use the vendored dependencies.
|
||||
// The variable GOPROXY is set to `off` so that no network calls are made. All
|
||||
// files are copied to a temporary directory before `go build` is called within
|
||||
// that directory.
|
||||
// This program needs Go >= 1.11. It'll use Go modules for compilation. It
|
||||
// builds the package configured as Main in the Config struct.
|
||||
|
||||
// BSD 2-Clause License
|
||||
//
|
||||
// Copyright (c) 2016-2018, Alexander Neumann <alexander@bumpern.de>
|
||||
// All rights reserved.
|
||||
//
|
||||
// This file has been copied from the repository at:
|
||||
// This file has been derived from the repository at:
|
||||
// https://github.com/fd0/build-go
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@@ -60,12 +53,12 @@ import (
|
||||
|
||||
// config contains the configuration for the program to build.
|
||||
var config = Config{
|
||||
Name: "restic", // name of the program executable and directory
|
||||
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
||||
Main: "./cmd/restic", // package name for the main package
|
||||
DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used
|
||||
Tests: []string{"./..."}, // tests to run
|
||||
MinVersion: GoVersion{Major: 1, Minor: 9, Patch: 0}, // minimum Go version supported
|
||||
Name: "restic", // name of the program executable and directory
|
||||
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
||||
Main: "./cmd/restic", // package name for the main package
|
||||
DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used
|
||||
Tests: []string{"./..."}, // tests to run
|
||||
MinVersion: GoVersion{Major: 1, Minor: 11, Patch: 0}, // minimum Go version supported
|
||||
}
|
||||
|
||||
// Config configures the build.
|
||||
@@ -79,125 +72,13 @@ type Config struct {
|
||||
}
|
||||
|
||||
var (
|
||||
verbose bool
|
||||
keepGopath bool
|
||||
runTests bool
|
||||
enableCGO bool
|
||||
enablePIE bool
|
||||
goVersion = ParseGoVersion(runtime.Version())
|
||||
verbose bool
|
||||
runTests bool
|
||||
enableCGO bool
|
||||
enablePIE bool
|
||||
goVersion = ParseGoVersion(runtime.Version())
|
||||
)
|
||||
|
||||
// copy all Go files in src to dst, creating directories on the fly, so calling
|
||||
//
|
||||
// copy("/tmp/gopath/src/github.com/restic/restic", "/home/u/restic")
|
||||
//
|
||||
// with "/home/u/restic" containing the file "foo.go" yields the following tree
|
||||
// at "/tmp/gopath":
|
||||
//
|
||||
// /tmp/gopath
|
||||
// └── src
|
||||
// └── github.com
|
||||
// └── restic
|
||||
// └── restic
|
||||
// └── foo.go
|
||||
func copy(dst, src string) error {
|
||||
verbosePrintf("copy contents of %v to %v\n", src, dst)
|
||||
return filepath.Walk(src, func(name string, fi os.FileInfo, err error) error {
|
||||
if name == src {
|
||||
return err
|
||||
}
|
||||
|
||||
if name == ".git" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
intermediatePath, err := filepath.Rel(src, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileSrc := filepath.Join(src, intermediatePath)
|
||||
fileDst := filepath.Join(dst, intermediatePath)
|
||||
|
||||
return copyFile(fileDst, fileSrc)
|
||||
})
|
||||
}
|
||||
|
||||
func directoryExists(dirname string) bool {
|
||||
stat, err := os.Stat(dirname)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return stat.IsDir()
|
||||
}
|
||||
|
||||
func fileExists(filename string) bool {
|
||||
stat, err := os.Stat(filename)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return stat.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// copyFile creates dst from src, preserving file attributes and timestamps.
|
||||
func copyFile(dst, src string) error {
|
||||
fi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fsrc, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
fmt.Printf("MkdirAll(%v)\n", filepath.Dir(dst))
|
||||
return err
|
||||
}
|
||||
|
||||
fdst, err := os.Create(dst)
|
||||
if err != nil {
|
||||
_ = fsrc.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fdst, fsrc)
|
||||
if err != nil {
|
||||
_ = fsrc.Close()
|
||||
_ = fdst.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
err = fdst.Close()
|
||||
if err != nil {
|
||||
_ = fsrc.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsrc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Chmod(dst, fi.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Chtimes(dst, fi.ModTime(), fi.ModTime())
|
||||
}
|
||||
|
||||
// die prints the message with fmt.Fprintf() to stderr and exits with an error
|
||||
// code.
|
||||
func die(message string, args ...interface{}) {
|
||||
@@ -211,7 +92,6 @@ func showUsage(output io.Writer) {
|
||||
fmt.Fprintf(output, "OPTIONS:\n")
|
||||
fmt.Fprintf(output, " -v --verbose output more messages\n")
|
||||
fmt.Fprintf(output, " -t --tags specify additional build tags\n")
|
||||
fmt.Fprintf(output, " -k --keep-tempdir do not remove the temporary directory after build\n")
|
||||
fmt.Fprintf(output, " -T --test run tests\n")
|
||||
fmt.Fprintf(output, " -o --output set output file name\n")
|
||||
fmt.Fprintf(output, " --enable-cgo use CGO to link against libc\n")
|
||||
@@ -219,7 +99,6 @@ func showUsage(output io.Writer) {
|
||||
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
|
||||
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
|
||||
fmt.Fprintf(output, " --goarm value set GOARM for cross-compilation\n")
|
||||
fmt.Fprintf(output, " --tempdir dir use a specific directory for compilation\n")
|
||||
}
|
||||
|
||||
func verbosePrintf(message string, args ...interface{}) {
|
||||
@@ -230,49 +109,39 @@ func verbosePrintf(message string, args ...interface{}) {
|
||||
fmt.Printf("build: "+message, args...)
|
||||
}
|
||||
|
||||
// cleanEnv returns a clean environment with GOPATH, GOBIN and GO111MODULE
|
||||
// removed (if present).
|
||||
func cleanEnv() (env []string) {
|
||||
removeKeys := map[string]struct{}{
|
||||
"GOPATH": struct{}{},
|
||||
"GOBIN": struct{}{},
|
||||
"GO111MODULE": struct{}{},
|
||||
}
|
||||
|
||||
for _, v := range os.Environ() {
|
||||
data := strings.SplitN(v, "=", 2)
|
||||
name := data[0]
|
||||
|
||||
if _, ok := removeKeys[name]; ok {
|
||||
// printEnv prints Go-relevant environment variables in a nice way using verbosePrintf.
|
||||
func printEnv(env []string) {
|
||||
verbosePrintf("environment (GO*):\n")
|
||||
for _, v := range env {
|
||||
// ignore environment variables which do not start with GO*.
|
||||
if !strings.HasPrefix(v, "GO") {
|
||||
continue
|
||||
}
|
||||
|
||||
env = append(env, v)
|
||||
verbosePrintf(" %s\n", v)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
// build runs "go build args..." with GOPATH set to gopath.
|
||||
func build(cwd string, env map[string]string, args ...string) error {
|
||||
a := []string{"build"}
|
||||
|
||||
if goVersion.AtLeast(GoVersion{1, 10, 0}) {
|
||||
verbosePrintf("Go version is at least 1.10, using new syntax for -gcflags\n")
|
||||
// use new prefix
|
||||
// try to remove all absolute paths from resulting binary
|
||||
if goVersion.AtLeast(GoVersion{1, 13, 0}) {
|
||||
// use the new flag introduced by Go 1.13
|
||||
a = append(a, "-trimpath")
|
||||
} else {
|
||||
// otherwise try to trim as many paths as possible
|
||||
a = append(a, "-asmflags", fmt.Sprintf("all=-trimpath=%s", cwd))
|
||||
a = append(a, "-gcflags", fmt.Sprintf("all=-trimpath=%s", cwd))
|
||||
} else {
|
||||
a = append(a, "-asmflags", fmt.Sprintf("-trimpath=%s", cwd))
|
||||
a = append(a, "-gcflags", fmt.Sprintf("-trimpath=%s", cwd))
|
||||
}
|
||||
|
||||
if enablePIE {
|
||||
a = append(a, "-buildmode=pie")
|
||||
}
|
||||
|
||||
a = append(a, args...)
|
||||
cmd := exec.Command("go", a...)
|
||||
cmd.Env = append(cleanEnv(), "GOPROXY=off")
|
||||
cmd.Env = os.Environ()
|
||||
for k, v := range env {
|
||||
cmd.Env = append(cmd.Env, k+"="+v)
|
||||
}
|
||||
@@ -280,6 +149,8 @@ func build(cwd string, env map[string]string, args ...string) error {
|
||||
cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
|
||||
}
|
||||
|
||||
printEnv(cmd.Env)
|
||||
|
||||
cmd.Dir = cwd
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
@@ -294,7 +165,7 @@ func build(cwd string, env map[string]string, args ...string) error {
|
||||
func test(cwd string, env map[string]string, args ...string) error {
|
||||
args = append([]string{"test", "-count", "1"}, args...)
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(cleanEnv(), "GOPROXY=off")
|
||||
cmd.Env = os.Environ()
|
||||
for k, v := range env {
|
||||
cmd.Env = append(cmd.Env, k+"="+v)
|
||||
}
|
||||
@@ -305,6 +176,8 @@ func test(cwd string, env map[string]string, args ...string) error {
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
printEnv(cmd.Env)
|
||||
|
||||
verbosePrintf("chdir %q\n", cwd)
|
||||
verbosePrintf("go %q\n", args)
|
||||
|
||||
@@ -454,6 +327,10 @@ func (v GoVersion) String() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if !goVersion.AtLeast(GoVersion{1, 11, 0}) {
|
||||
die("Go version (%v) is too old, Go <= 1.11 does not support Go Modules\n", goVersion)
|
||||
}
|
||||
|
||||
if !goVersion.AtLeast(config.MinVersion) {
|
||||
fmt.Fprintf(os.Stderr, "%s detected, this program requires at least %s\n", goVersion, config.MinVersion)
|
||||
os.Exit(1)
|
||||
@@ -464,15 +341,13 @@ func main() {
|
||||
skipNext := false
|
||||
params := os.Args[1:]
|
||||
|
||||
goEnv := map[string]string{}
|
||||
buildEnv := map[string]string{
|
||||
"GOOS": runtime.GOOS,
|
||||
"GOARCH": runtime.GOARCH,
|
||||
"GOARM": "",
|
||||
env := map[string]string{
|
||||
"GO111MODULE": "on", // make sure we build in Module mode
|
||||
"GOOS": runtime.GOOS,
|
||||
"GOARCH": runtime.GOARCH,
|
||||
"GOARM": "",
|
||||
}
|
||||
|
||||
tempdir := ""
|
||||
|
||||
var outputFilename string
|
||||
|
||||
for i, arg := range params {
|
||||
@@ -484,8 +359,6 @@ func main() {
|
||||
switch arg {
|
||||
case "-v", "--verbose":
|
||||
verbose = true
|
||||
case "-k", "--keep-gopath":
|
||||
keepGopath = true
|
||||
case "-t", "-tags", "--tags":
|
||||
if i+1 >= len(params) {
|
||||
die("-t given but no tag specified")
|
||||
@@ -495,9 +368,6 @@ func main() {
|
||||
case "-o", "--output":
|
||||
skipNext = true
|
||||
outputFilename = params[i+1]
|
||||
case "--tempdir":
|
||||
skipNext = true
|
||||
tempdir = params[i+1]
|
||||
case "-T", "--test":
|
||||
runTests = true
|
||||
case "--enable-cgo":
|
||||
@@ -506,13 +376,13 @@ func main() {
|
||||
enablePIE = true
|
||||
case "--goos":
|
||||
skipNext = true
|
||||
buildEnv["GOOS"] = params[i+1]
|
||||
env["GOOS"] = params[i+1]
|
||||
case "--goarch":
|
||||
skipNext = true
|
||||
buildEnv["GOARCH"] = params[i+1]
|
||||
env["GOARCH"] = params[i+1]
|
||||
case "--goarm":
|
||||
skipNext = true
|
||||
buildEnv["GOARM"] = params[i+1]
|
||||
env["GOARM"] = params[i+1]
|
||||
case "-h":
|
||||
showUsage(os.Stdout)
|
||||
return
|
||||
@@ -525,8 +395,12 @@ func main() {
|
||||
|
||||
verbosePrintf("detected Go version %v\n", goVersion)
|
||||
|
||||
preserveSymbols := false
|
||||
for i := range buildTags {
|
||||
buildTags[i] = strings.TrimSpace(buildTags[i])
|
||||
if buildTags[i] == "debug" || buildTags[i] == "profile" {
|
||||
preserveSymbols = true
|
||||
}
|
||||
}
|
||||
|
||||
verbosePrintf("build tags: %s\n", buildTags)
|
||||
@@ -538,7 +412,7 @@ func main() {
|
||||
|
||||
if outputFilename == "" {
|
||||
outputFilename = config.Name
|
||||
if buildEnv["GOOS"] == "windows" {
|
||||
if env["GOOS"] == "windows" {
|
||||
outputFilename += ".exe"
|
||||
}
|
||||
}
|
||||
@@ -553,7 +427,11 @@ func main() {
|
||||
if version != "" {
|
||||
constants["main.version"] = version
|
||||
}
|
||||
ldflags := "-s -w " + constants.LDFlags()
|
||||
ldflags := constants.LDFlags()
|
||||
if !preserveSymbols {
|
||||
// Strip debug symbols.
|
||||
ldflags = "-s -w " + ldflags
|
||||
}
|
||||
verbosePrintf("ldflags: %s\n", ldflags)
|
||||
|
||||
var (
|
||||
@@ -567,54 +445,18 @@ func main() {
|
||||
}
|
||||
|
||||
buildTarget := filepath.FromSlash(mainPackage)
|
||||
buildCWD := ""
|
||||
|
||||
if goVersion.AtLeast(GoVersion{1, 11, 0}) && fileExists("go.mod") {
|
||||
verbosePrintf("Go >= 1.11 and 'go.mod' found, building with modules\n")
|
||||
buildCWD = root
|
||||
|
||||
buildArgs = append(buildArgs, "-mod=vendor")
|
||||
testArgs = append(testArgs, "-mod=vendor")
|
||||
} else {
|
||||
if tempdir == "" {
|
||||
tempdir, err = ioutil.TempDir("", fmt.Sprintf("%v-build-", config.Name))
|
||||
if err != nil {
|
||||
die("TempDir(): %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
verbosePrintf("Go < 1.11 or 'go.mod' not found, create GOPATH at %v\n", tempdir)
|
||||
targetdir := filepath.Join(tempdir, "src", filepath.FromSlash(config.Namespace))
|
||||
if err = copy(targetdir, root); err != nil {
|
||||
die("copying files from %v to %v/src failed: %v\n", root, tempdir, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !keepGopath {
|
||||
verbosePrintf("remove %v\n", tempdir)
|
||||
if err = os.RemoveAll(tempdir); err != nil {
|
||||
die("remove GOPATH at %s failed: %v\n", tempdir, err)
|
||||
}
|
||||
} else {
|
||||
verbosePrintf("leaving temporary GOPATH at %v\n", tempdir)
|
||||
}
|
||||
}()
|
||||
|
||||
buildCWD = targetdir
|
||||
|
||||
goEnv["GOPATH"] = tempdir
|
||||
buildEnv["GOPATH"] = tempdir
|
||||
buildCWD, err := os.Getwd()
|
||||
if err != nil {
|
||||
die("unable to determine current working directory: %v\n", err)
|
||||
}
|
||||
|
||||
verbosePrintf("environment:\n go: %v\n build: %v\n", goEnv, buildEnv)
|
||||
|
||||
buildArgs = append(buildArgs,
|
||||
"-tags", strings.Join(buildTags, " "),
|
||||
"-ldflags", ldflags,
|
||||
"-o", output, buildTarget,
|
||||
)
|
||||
|
||||
err = build(buildCWD, buildEnv, buildArgs...)
|
||||
err = build(buildCWD, env, buildArgs...)
|
||||
if err != nil {
|
||||
die("build failed: %v\n", err)
|
||||
}
|
||||
@@ -624,7 +466,7 @@ func main() {
|
||||
|
||||
testArgs = append(testArgs, config.Tests...)
|
||||
|
||||
err = test(buildCWD, goEnv, testArgs...)
|
||||
err = test(buildCWD, env, testArgs...)
|
||||
if err != nil {
|
||||
die("running tests failed: %v\n", err)
|
||||
}
|
||||
|
7
changelog/0.9.5_2019-04-23/issue-1895
Normal file
7
changelog/0.9.5_2019-04-23/issue-1895
Normal file
@@ -0,0 +1,7 @@
|
||||
Enhancement: Add case insensitive include & exclude options
|
||||
|
||||
The backup and restore commands now have --iexclude and --iinclude flags
|
||||
as case insensitive variants of --exclude and --include.
|
||||
|
||||
https://github.com/restic/restic/issues/1895
|
||||
https://github.com/restic/restic/pull/2032
|
8
changelog/0.9.5_2019-04-23/issue-1937
Normal file
8
changelog/0.9.5_2019-04-23/issue-1937
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Support streaming JSON output for backup
|
||||
|
||||
We've added support for getting machine-readable status output during backup,
|
||||
just pass the flag `--json` for `restic backup` and restic will output a stream
|
||||
of JSON objects which contain the current progress.
|
||||
|
||||
https://github.com/restic/restic/issues/1937
|
||||
https://github.com/restic/restic/pull/1944
|
10
changelog/0.9.5_2019-04-23/issue-2135
Normal file
10
changelog/0.9.5_2019-04-23/issue-2135
Normal file
@@ -0,0 +1,10 @@
|
||||
Bugfix: Return error when no bytes could be read from stdin
|
||||
|
||||
We assume that users reading backup data from stdin want to know when no data
|
||||
could be read, so now restic returns an error when `backup --stdin` is called
|
||||
but no bytes could be read. Usually, this means that an earlier command in a
|
||||
pipe has failed. The documentation was amended and now recommends setting the
|
||||
`pipefail` option (`set -o pipefail`).
|
||||
|
||||
https://github.com/restic/restic/pull/2135
|
||||
https://github.com/restic/restic/pull/2139
|
8
changelog/0.9.5_2019-04-23/issue-2155
Normal file
8
changelog/0.9.5_2019-04-23/issue-2155
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: add Openstack application credential auth for Swift
|
||||
|
||||
Since Openstack Queens Identity (auth V3) service supports an application
|
||||
credential auth method. It allows to create a technical account with the
|
||||
limited roles. This commit adds an application credential authentication
|
||||
method for the Swift backend.
|
||||
|
||||
https://github.com/restic/restic/issues/2155
|
3
changelog/0.9.5_2019-04-23/issue-2181
Normal file
3
changelog/0.9.5_2019-04-23/issue-2181
Normal file
@@ -0,0 +1,3 @@
|
||||
Bugfix: Don't cancel timeout after 30 seconds for self-update
|
||||
|
||||
https://github.com/restic/restic/issues/2181
|
8
changelog/0.9.5_2019-04-23/issue-2184
Normal file
8
changelog/0.9.5_2019-04-23/issue-2184
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Add --json support to forget command
|
||||
|
||||
The forget command now supports the --json argument, outputting the
|
||||
information about what is (or would-be) kept and removed from the
|
||||
repository.
|
||||
|
||||
https://github.com/restic/restic/issues/2184
|
||||
https://github.com/restic/restic/pull/2185
|
6
changelog/0.9.5_2019-04-23/issue-2203
Normal file
6
changelog/0.9.5_2019-04-23/issue-2203
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Fix reading passwords from stdin
|
||||
|
||||
Passwords for the `init`, `key add`, and `key passwd` commands can now be read from
|
||||
non-terminal stdin.
|
||||
|
||||
https://github.com/restic/restic/issues/2203
|
9
changelog/0.9.5_2019-04-23/issue-2224
Normal file
9
changelog/0.9.5_2019-04-23/issue-2224
Normal file
@@ -0,0 +1,9 @@
|
||||
Bugfix: Don't abort the find command when a tree can't be loaded
|
||||
|
||||
Change the find command so that missing trees don't result in a crash.
|
||||
Instead, the error is logged to the debug log, and the tree ID is displayed
|
||||
along with the snapshot it belongs to. This makes it possible to recover
|
||||
repositories that are missing trees by forgetting the snapshots they are used
|
||||
in.
|
||||
|
||||
https://github.com/restic/restic/issues/2224
|
10
changelog/0.9.5_2019-04-23/pull-2087
Normal file
10
changelog/0.9.5_2019-04-23/pull-2087
Normal file
@@ -0,0 +1,10 @@
|
||||
Enhancement: Add group-by option to snapshots command
|
||||
|
||||
We have added an option to group the output of the snapshots command, similar
|
||||
to the output of the forget command. The option has been called "--group-by"
|
||||
and accepts any combination of the values "host", "paths" and "tags", separated
|
||||
by commas. Default behavior (not specifying --group-by) has not been changed.
|
||||
We have added support of the grouping to the JSON output.
|
||||
|
||||
https://github.com/restic/restic/issues/2037
|
||||
https://github.com/restic/restic/pull/2087
|
8
changelog/0.9.5_2019-04-23/pull-2124
Normal file
8
changelog/0.9.5_2019-04-23/pull-2124
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Ability to dump folders to tar via stdout
|
||||
|
||||
We've added the ability to dump whole folders to stdout via the `dump` command.
|
||||
Restic now requires at least Go 1.10 due to a limitation of the standard
|
||||
library for Go <= 1.9.
|
||||
|
||||
https://github.com/restic/restic/pull/2124
|
||||
https://github.com/restic/restic/issues/2123
|
8
changelog/0.9.5_2019-04-23/pull-2139
Normal file
8
changelog/0.9.5_2019-04-23/pull-2139
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Return error if no bytes could be read for `backup --stdin`
|
||||
|
||||
When restic is used to backup the output of a program, like `mysqldump | restic
|
||||
backup --stdin`, it now returns an error if no bytes could be read at all. This
|
||||
catches the failure case when `mysqldump` failed for some reason and did not
|
||||
output any data to stdout.
|
||||
|
||||
https://github.com/restic/restic/pull/2139
|
10
changelog/0.9.5_2019-04-23/pull-2205
Normal file
10
changelog/0.9.5_2019-04-23/pull-2205
Normal file
@@ -0,0 +1,10 @@
|
||||
Enhancement: Add --ignore-inode option to backup cmd
|
||||
|
||||
This option handles backup of virtual filesystems that do not keep fixed
|
||||
inodes for files, like Fuse-based, pCloud, etc. Ignoring inode changes allows
|
||||
to consider the file as unchanged if last modification date and size
|
||||
are unchanged.
|
||||
|
||||
https://github.com/restic/restic/pull/2205
|
||||
https://github.com/restic/restic/pull/2047
|
||||
https://github.com/restic/restic/issues/1631
|
16
changelog/0.9.5_2019-04-23/pull-2220
Normal file
16
changelog/0.9.5_2019-04-23/pull-2220
Normal file
@@ -0,0 +1,16 @@
|
||||
Enhancement: Add config option to set S3 storage class
|
||||
|
||||
The `s3.storage-class` option can be passed to restic (using `-o`) to
|
||||
specify the storage class to be used for S3 objects created by restic.
|
||||
|
||||
The storage class is passed as-is to S3, so it needs to be understood by
|
||||
the API. On AWS, it can be one of `STANDARD`, `STANDARD_IA`,
|
||||
`ONEZONE_IA`, `INTELLIGENT_TIERING` and `REDUCED_REDUNDANCY`. If
|
||||
unspecified, the default storage class is used (`STANDARD` on AWS).
|
||||
|
||||
You can mix storage classes in the same bucket, and the setting isn't
|
||||
stored in the restic repository, so be sure to specify it with each
|
||||
command that writes to S3.
|
||||
|
||||
https://github.com/restic/restic/pull/2220
|
||||
https://github.com/restic/restic/issues/706
|
6
changelog/0.9.6_2019-11-22/issue-2063
Normal file
6
changelog/0.9.6_2019-11-22/issue-2063
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Allow absolute path for filename when backing up from stdin
|
||||
|
||||
When backing up from stdin, handle directory path for `--stdin-filename`.
|
||||
This can be used to specify the full path for the backed-up file.
|
||||
|
||||
https://github.com/restic/restic/issues/2063
|
10
changelog/0.9.6_2019-11-22/issue-2174
Normal file
10
changelog/0.9.6_2019-11-22/issue-2174
Normal file
@@ -0,0 +1,10 @@
|
||||
Bugfix: Save files with invalid timestamps
|
||||
|
||||
When restic reads invalid timestamps (year is before 0000 or after 9999) it
|
||||
refused to read and archive the file. We've changed the behavior and will now
|
||||
save modified timestamps with the year set to either 0000 or 9999, the rest of
|
||||
the timestamp stays the same, so the file will be saved (albeit with a bogus
|
||||
timestamp).
|
||||
|
||||
https://github.com/restic/restic/issues/2174
|
||||
https://github.com/restic/restic/issues/1173
|
15
changelog/0.9.6_2019-11-22/issue-2179
Normal file
15
changelog/0.9.6_2019-11-22/issue-2179
Normal file
@@ -0,0 +1,15 @@
|
||||
Enhancement: Use ctime when checking for file changes
|
||||
|
||||
Previously, restic only checked a file's mtime (along with other non-timestamp
|
||||
metadata) to decide if a file has changed. This could cause restic to not notice
|
||||
that a file has changed (and therefore continue to store the old version, as
|
||||
opposed to the modified version) if something edits the file and then resets the
|
||||
timestamp. Restic now also checks the ctime of files, so any modifications to a
|
||||
file should be noticed, and the modified file will be backed up. The ctime check
|
||||
will be disabled if the --ignore-inode flag was given.
|
||||
|
||||
If this change causes problems for you, please open an issue, and we can look in
|
||||
to adding a seperate flag to disable just the ctime check.
|
||||
|
||||
https://github.com/restic/restic/issues/2179
|
||||
https://github.com/restic/restic/pull/2212
|
6
changelog/0.9.6_2019-11-22/issue-2249
Normal file
6
changelog/0.9.6_2019-11-22/issue-2249
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Read fresh metadata for unmodified files
|
||||
|
||||
Restic took all metadata for files which were detected as unmodified, not taking into account changed metadata (ownership, mode). This is now corrected.
|
||||
|
||||
https://github.com/restic/restic/issues/2249
|
||||
https://github.com/restic/restic/pull/2252
|
7
changelog/0.9.6_2019-11-22/issue-2301
Normal file
7
changelog/0.9.6_2019-11-22/issue-2301
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Add upper bound for t in --read-data-subset=n/t
|
||||
|
||||
256 is the effective maximum for t, but restic would allow larger
|
||||
values, leading to strange behavior.
|
||||
|
||||
https://github.com/restic/restic/issues/2301
|
||||
https://github.com/restic/restic/pull/2304
|
7
changelog/0.9.6_2019-11-22/issue-2306
Normal file
7
changelog/0.9.6_2019-11-22/issue-2306
Normal file
@@ -0,0 +1,7 @@
|
||||
Enhancement: Allow multiple retries for interactive password input
|
||||
|
||||
Restic used to quit if the repository password was typed incorrectly once.
|
||||
Restic will now ask the user again for the repository password if typed incorrectly.
|
||||
The user will now get three tries to input the correct password before restic quits.
|
||||
|
||||
https://github.com/restic/restic/issues/2306
|
6
changelog/0.9.6_2019-11-22/issue-2330
Normal file
6
changelog/0.9.6_2019-11-22/issue-2330
Normal file
@@ -0,0 +1,6 @@
|
||||
Enhancement: Make `--group-by` accept both singular and plural
|
||||
|
||||
One can now use the values `host`/`hosts`, `path`/`paths` and
|
||||
`tag` / `tags` interchangeably in the `--group-by` argument.
|
||||
|
||||
https://github.com/restic/restic/issues/2330
|
8
changelog/0.9.6_2019-11-22/pull-2321
Normal file
8
changelog/0.9.6_2019-11-22/pull-2321
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Check errors when loading index files
|
||||
|
||||
Restic now checks and handles errors which occur when loading index files, the
|
||||
missing check leads to odd errors (and a stack trace printed to users) later.
|
||||
This was reported in the forum.
|
||||
|
||||
https://github.com/restic/restic/pull/2321
|
||||
https://forum.restic.net/t/check-rebuild-index-prune/1848/13
|
8
changelog/0.9.6_2019-11-22/pull-2350
Normal file
8
changelog/0.9.6_2019-11-22/pull-2350
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Add option to configure S3 region
|
||||
|
||||
We've added a new option for setting the region when accessing an S3-compatible
|
||||
service. For some providers, it is required to set this to a valid value. You
|
||||
can do that either by setting the environment variable `AWS_DEFAULT_REGION` or
|
||||
using the option `s3.region`, e.g. like this: `-o s3.region="us-east-1"`.
|
||||
|
||||
https://github.com/restic/restic/pull/2350
|
@@ -16,7 +16,7 @@ Details
|
||||
{{ range $entry := .Entries }}{{ with $entry }}
|
||||
* {{ .Type }} #{{ .PrimaryID }}: {{ .Title }}
|
||||
{{ range $par := .Paragraphs }}
|
||||
{{ wrap $par 80 3 }}
|
||||
{{ wrapIndent $par 80 3 }}
|
||||
{{ end -}}
|
||||
{{ range $url := .IssueURLs }}
|
||||
{{ $url -}}
|
||||
|
@@ -9,4 +9,4 @@ in case there aren't any issue links) is used as the primary ID.
|
||||
|
||||
https://github.com/restic/restic/issues/1234
|
||||
https://github.com/restic/restic/pull/55555
|
||||
https://forum.restic/.net/foo/bar/baz
|
||||
https://forum.restic.net/foo/bar/baz
|
||||
|
0
changelog/unreleased/.gitkeep
Normal file
0
changelog/unreleased/.gitkeep
Normal file
9
changelog/unreleased/issue-1570
Normal file
9
changelog/unreleased/issue-1570
Normal file
@@ -0,0 +1,9 @@
|
||||
Enhancement: Support specifying multiple host flags for various commands
|
||||
|
||||
Previously commands didn't take more than one `--host` or `-H` argument into account, which could be limiting with e.g.
|
||||
the `forget` command.
|
||||
|
||||
The `dump`, `find`, `forget`, `ls`, `mount`, `restore`, `snapshots`, `stats` and `tag` commands will now take into account
|
||||
multiple `--host` and `-H` flags.
|
||||
|
||||
https://github.com/restic/restic/issues/1570
|
5
changelog/unreleased/issue-2072
Normal file
5
changelog/unreleased/issue-2072
Normal file
@@ -0,0 +1,5 @@
|
||||
Enhancement: Display snapshot date when using `restic find`
|
||||
|
||||
Added the respective snapshot date to the output of `restic find`.
|
||||
|
||||
https://github.com/restic/restic/issues/2072
|
5
changelog/unreleased/issue-2277
Normal file
5
changelog/unreleased/issue-2277
Normal file
@@ -0,0 +1,5 @@
|
||||
Enhancement: Add support for ppc64le
|
||||
|
||||
Adds support for ppc64le, the processor architecture from IBM.
|
||||
|
||||
https://github.com/restic/restic/issues/2277
|
7
changelog/unreleased/issue-2281
Normal file
7
changelog/unreleased/issue-2281
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Handle format verbs like '%' properly in `find` output
|
||||
|
||||
The JSON or "normal" output of the `find` command can now deal with file names
|
||||
that contain substrings which the Golang `fmt` package considers "format verbs"
|
||||
like `%s`.
|
||||
|
||||
https://github.com/restic/restic/issues/2281
|
8
changelog/unreleased/issue-2298
Normal file
8
changelog/unreleased/issue-2298
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Do not hang when run as a background job
|
||||
|
||||
Restic did hang on exit while restoring the terminal configuration when it was
|
||||
started as a background job, for example using `restic ... &`. This has been
|
||||
fixed by only restoring the terminal configuration when restic is interrupted
|
||||
while reading a password from the terminal.
|
||||
|
||||
https://github.com/restic/restic/issues/2298
|
8
changelog/unreleased/issue-2389
Normal file
8
changelog/unreleased/issue-2389
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Fix mangled json output of backup command
|
||||
|
||||
We've fixed a race condition in the json output of the backup command
|
||||
that could cause multiple lines to get mixed up. We've also ensured that
|
||||
the backup summary is printed last.
|
||||
|
||||
https://github.com/restic/restic/issues/2389
|
||||
https://github.com/restic/restic/pull/2545
|
5
changelog/unreleased/issue-2390
Normal file
5
changelog/unreleased/issue-2390
Normal file
@@ -0,0 +1,5 @@
|
||||
Bugfix: Refresh lock timestamp
|
||||
|
||||
Long-running operations did not refresh lock timestamp, resulting in locks becoming stale. This is now fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/2390
|
6
changelog/unreleased/issue-2429
Normal file
6
changelog/unreleased/issue-2429
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: backup --json reports total_bytes_processed as 0
|
||||
|
||||
We've fixed the json output of total_bytes_processed. The non-json output
|
||||
was already fixed with pull request #2138 but left the json output untouched.
|
||||
|
||||
https://github.com/restic/restic/issues/2429
|
5
changelog/unreleased/issue-2469
Normal file
5
changelog/unreleased/issue-2469
Normal file
@@ -0,0 +1,5 @@
|
||||
Bugfix: Fix incorrect bytes stats in `diff` command
|
||||
|
||||
In some cases, the wrong number of bytes (e.g. 16777215.998 TiB) were reported by the `diff` command. This is now fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/2469
|
9
changelog/unreleased/issue-2482
Normal file
9
changelog/unreleased/issue-2482
Normal file
@@ -0,0 +1,9 @@
|
||||
Change: Remove vendored dependencies
|
||||
|
||||
We've removed the vendored dependencies (in the subdir `vendor/`). When
|
||||
building restic, the Go compiler automatically fetches the dependencies. It
|
||||
will also cryptographically verify that the correct code has been fetched by
|
||||
using the hashes in `go.sum` (see the link to the documentation below).
|
||||
|
||||
https://github.com/restic/restic/issues/2482
|
||||
https://golang.org/cmd/go/#hdr-Module_downloading_and_verification
|
16
changelog/unreleased/issue-2518
Normal file
16
changelog/unreleased/issue-2518
Normal file
@@ -0,0 +1,16 @@
|
||||
Bugfix: Do not crash with Synology NAS sftp server
|
||||
|
||||
It was found that when restic is used to store data on an sftp server on a
|
||||
Synology NAS with a relative path (one which does not start with a slash), it
|
||||
may go into an endless loop trying to create directories on the server. We've
|
||||
fixed this bug by using a function in the sftp library instead of our own
|
||||
implementation.
|
||||
|
||||
The bug was discovered because the Synology sftp server behaves erratic with
|
||||
non-absolute path (e.g. `home/restic-repo`). This can be resolved by just using
|
||||
an absolute path instead (`/home/restic-repo`). We've also added a paragraph in
|
||||
the FAQ.
|
||||
|
||||
https://github.com/restic/restic/issues/2518
|
||||
https://github.com/restic/restic/issues/2363
|
||||
https://github.com/restic/restic/pull/2530
|
5
changelog/unreleased/issue-2531
Normal file
5
changelog/unreleased/issue-2531
Normal file
@@ -0,0 +1,5 @@
|
||||
Bugfix: Fix incorrect size calculation in `stats --mode restore-size`
|
||||
|
||||
The restore-size mode of stats was counting hard-linked files as if they were independent.
|
||||
|
||||
https://github.com/restic/restic/issues/2531
|
5
changelog/unreleased/issue-2537
Normal file
5
changelog/unreleased/issue-2537
Normal file
@@ -0,0 +1,5 @@
|
||||
Bugfix: Fix incorrect file counts in `stats --mode restore-size`
|
||||
|
||||
The restore-size mode of stats was failing to count empty directories and some files with hard links.
|
||||
|
||||
https://github.com/restic/restic/issues/2537
|
17
changelog/unreleased/pull-2195
Normal file
17
changelog/unreleased/pull-2195
Normal file
@@ -0,0 +1,17 @@
|
||||
Enhancement: Simplify and improve restore performance
|
||||
|
||||
Significantly improves restore performance of large files (i.e. 50M+):
|
||||
https://github.com/restic/restic/issues/2074
|
||||
https://forum.restic.net/t/restore-using-rclone-gdrive-backend-is-slow/1112/8
|
||||
https://forum.restic.net/t/degraded-restore-performance-s3-backend/1400
|
||||
|
||||
Fixes "not enough cache capacity" error during restore:
|
||||
https://github.com/restic/restic/issues/2244
|
||||
|
||||
NOTE: This new implementation does not guarantee order in which blobs
|
||||
are written to the target files and, for example, the last blob of a
|
||||
file can be written to the file before any of the preceeding file blobs.
|
||||
It is therefore possible to have gaps in the data written to the target
|
||||
files if restore fails or interrupted by the user.
|
||||
|
||||
https://github.com/restic/restic/pull/2195
|
5
changelog/unreleased/pull-2423
Normal file
5
changelog/unreleased/pull-2423
Normal file
@@ -0,0 +1,5 @@
|
||||
Enhancement: support user@domain parsing as user
|
||||
|
||||
Added the ability for user@domain-like users to be authenticated over SFTP servers.
|
||||
|
||||
https://github.com/restic/restic/pull/2423
|
7
changelog/unreleased/pull-2576
Normal file
7
changelog/unreleased/pull-2576
Normal file
@@ -0,0 +1,7 @@
|
||||
Enhancement: Improve the chunking algorithm
|
||||
|
||||
We've updated the chunker library responsible for splitting files into smaller
|
||||
blocks. It should improve the chunking throughput by 5-10% depending on the
|
||||
CPU.
|
||||
|
||||
https://github.com/restic/restic/pull/2576
|
6
changelog/unreleased/pull-2592
Normal file
6
changelog/unreleased/pull-2592
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: SFTP backend supports IPv6 addresses
|
||||
|
||||
The SFTP backend now supports IPv6 addresses natively, without relying on
|
||||
aliases in the external SSH configuration.
|
||||
|
||||
https://github.com/restic/restic/pull/2592
|
6
changelog/unreleased/pull-2600
Normal file
6
changelog/unreleased/pull-2600
Normal file
@@ -0,0 +1,6 @@
|
||||
Change: Require Go >= 1.11
|
||||
|
||||
Restic now requires Go to be at least 1.11. This allows simplifications in the
|
||||
build process and removing workarounds.
|
||||
|
||||
https://github.com/restic/restic/pull/2600
|
7
changelog/unreleased/pull-2607
Normal file
7
changelog/unreleased/pull-2607
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Honor RESTIC_CACHE_DIR environment variable on Mac and Windows
|
||||
|
||||
On Mac and Windows, the RESTIC_CACHE_DIR environment variable was ignored.
|
||||
This variable can now be used on all platforms to set the directory where
|
||||
restic stores caches.
|
||||
|
||||
https://github.com/restic/restic/pull/2607
|
6
changelog/unreleased/pull-2668
Normal file
6
changelog/unreleased/pull-2668
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Don't abort the stats command when data blobs are missing
|
||||
|
||||
Runing the stats command in the blobs-per-file mode on a repository with
|
||||
missing data blobs previously resulted in a crash.
|
||||
|
||||
https://github.com/restic/restic/pull/2668
|
@@ -1,18 +1,20 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.4
|
||||
- tip
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
go: "1.9.x"
|
||||
- os: linux
|
||||
go: "1.10.x"
|
||||
- os: linux
|
||||
go: "tip"
|
||||
- os: osx
|
||||
go: "1.10.x"
|
||||
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u golang.org/x/lint/golint
|
||||
- go get -u golang.org/x/tools/cmd/goimports
|
||||
|
||||
script:
|
@@ -1,5 +1,5 @@
|
||||
[](http://godoc.org/github.com/restic/chunker)
|
||||
[](https://travis-ci.org/restic/chunker)
|
||||
[](https://travis-ci.com/restic/chunker)
|
||||
|
||||
The package `chunker` implements content-defined-chunking (CDC) based on a
|
||||
rolling Rabin Hash. The library is part of the [restic backup
|
@@ -2,6 +2,7 @@ package chunker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
@@ -47,7 +48,7 @@ type Chunk struct {
|
||||
|
||||
type chunkerState struct {
|
||||
window [windowSize]byte
|
||||
wpos int
|
||||
wpos uint
|
||||
|
||||
buf []byte
|
||||
bpos uint
|
||||
@@ -225,6 +226,12 @@ func (c *Chunker) Next(data []byte) (Chunk, error) {
|
||||
tabout := c.tables.out
|
||||
tabmod := c.tables.mod
|
||||
polShift := c.polShift
|
||||
// go guarantees the expected behavior for bit shifts even for shift counts
|
||||
// larger than the value width. Bounding the value of polShift allows the compiler
|
||||
// to optimize the code for 'digest >> polShift'
|
||||
if polShift > 53-8 {
|
||||
return Chunk{}, errors.New("the polynomial must have a degree less than or equal 53")
|
||||
}
|
||||
minSize := c.MinSize
|
||||
maxSize := c.MaxSize
|
||||
buf := c.buf
|
||||
@@ -259,6 +266,10 @@ func (c *Chunker) Next(data []byte) (Chunk, error) {
|
||||
return Chunk{}, err
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return Chunk{}, fmt.Errorf("ReadFull returned negative number of bytes read: %v", n)
|
||||
}
|
||||
|
||||
c.bpos = 0
|
||||
c.bmax = uint(n)
|
||||
}
|
||||
@@ -291,10 +302,12 @@ func (c *Chunker) Next(data []byte) (Chunk, error) {
|
||||
wpos := c.wpos
|
||||
for _, b := range buf[c.bpos:c.bmax] {
|
||||
// slide(b)
|
||||
// limit wpos before to elide array bound checks
|
||||
wpos = wpos % windowSize
|
||||
out := win[wpos]
|
||||
win[wpos] = b
|
||||
digest ^= uint64(tabout[out])
|
||||
wpos = (wpos + 1) % windowSize
|
||||
wpos++
|
||||
|
||||
// updateDigest
|
||||
index := byte(digest >> polShift)
|
||||
@@ -331,7 +344,7 @@ func (c *Chunker) Next(data []byte) (Chunk, error) {
|
||||
}
|
||||
c.digest = digest
|
||||
c.window = win
|
||||
c.wpos = wpos
|
||||
c.wpos = wpos % windowSize
|
||||
|
||||
steps := c.bmax - c.bpos
|
||||
if steps > 0 {
|
347
chunker/chunker_test.go
Normal file
347
chunker/chunker_test.go
Normal file
@@ -0,0 +1,347 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func parseDigest(s string) []byte {
|
||||
d, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
Length uint
|
||||
CutFP uint64
|
||||
Digest []byte
|
||||
}
|
||||
|
||||
// polynomial used for all the tests below
|
||||
const testPol = Pol(0x3DA3358B4DC173)
|
||||
|
||||
// created for 32MB of random data out of math/rand's Uint32() seeded by
|
||||
// constant 23
|
||||
//
|
||||
// chunking configuration:
|
||||
// window size 64, avg chunksize 1<<20, min chunksize 1<<19, max chunksize 1<<23
|
||||
// polynom 0x3DA3358B4DC173
|
||||
var chunks1 = []chunk{
|
||||
chunk{2163460, 0x000b98d4cdf00000, parseDigest("4b94cb2cf293855ea43bf766731c74969b91aa6bf3c078719aabdd19860d590d")},
|
||||
chunk{643703, 0x000d4e8364d00000, parseDigest("5727a63c0964f365ab8ed2ccf604912f2ea7be29759a2b53ede4d6841e397407")},
|
||||
chunk{1528956, 0x0015a25c2ef00000, parseDigest("a73759636a1e7a2758767791c69e81b69fb49236c6929e5d1b654e06e37674ba")},
|
||||
chunk{1955808, 0x00102a8242e00000, parseDigest("c955fb059409b25f07e5ae09defbbc2aadf117c97a3724e06ad4abd2787e6824")},
|
||||
chunk{2222372, 0x00045da878000000, parseDigest("6ba5e9f7e1b310722be3627716cf469be941f7f3e39a4c3bcefea492ec31ee56")},
|
||||
chunk{2538687, 0x00198a8179900000, parseDigest("8687937412f654b5cfe4a82b08f28393a0c040f77c6f95e26742c2fc4254bfde")},
|
||||
chunk{609606, 0x001d4e8d17100000, parseDigest("5da820742ff5feb3369112938d3095785487456f65a8efc4b96dac4be7ebb259")},
|
||||
chunk{1205738, 0x000a7204dd600000, parseDigest("cc70d8fad5472beb031b1aca356bcab86c7368f40faa24fe5f8922c6c268c299")},
|
||||
chunk{959742, 0x00183e71e1400000, parseDigest("4065bdd778f95676c92b38ac265d361f81bff17d76e5d9452cf985a2ea5a4e39")},
|
||||
chunk{4036109, 0x001fec043c700000, parseDigest("b9cf166e75200eb4993fc9b6e22300a6790c75e6b0fc8f3f29b68a752d42f275")},
|
||||
chunk{1525894, 0x000b1574b1500000, parseDigest("2f238180e4ca1f7520a05f3d6059233926341090f9236ce677690c1823eccab3")},
|
||||
chunk{1352720, 0x00018965f2e00000, parseDigest("afd12f13286a3901430de816e62b85cc62468c059295ce5888b76b3af9028d84")},
|
||||
chunk{811884, 0x00155628aa100000, parseDigest("42d0cdb1ee7c48e552705d18e061abb70ae7957027db8ae8db37ec756472a70a")},
|
||||
chunk{1282314, 0x001909a0a1400000, parseDigest("819721c2457426eb4f4c7565050c44c32076a56fa9b4515a1c7796441730eb58")},
|
||||
chunk{1318021, 0x001cceb980000000, parseDigest("842eb53543db55bacac5e25cb91e43cc2e310fe5f9acc1aee86bdf5e91389374")},
|
||||
chunk{948640, 0x0011f7a470a00000, parseDigest("b8e36bf7019bb96ac3fb7867659d2167d9d3b3148c09fe0de45850b8fe577185")},
|
||||
chunk{645464, 0x00030ce2d9400000, parseDigest("5584bd27982191c3329f01ed846bfd266e96548dfa87018f745c33cfc240211d")},
|
||||
chunk{533758, 0x0004435c53c00000, parseDigest("4da778a25b72a9a0d53529eccfe2e5865a789116cb1800f470d8df685a8ab05d")},
|
||||
chunk{1128303, 0x0000c48517800000, parseDigest("08c6b0b38095b348d80300f0be4c5184d2744a17147c2cba5cc4315abf4c048f")},
|
||||
chunk{800374, 0x000968473f900000, parseDigest("820284d2c8fd243429674c996d8eb8d3450cbc32421f43113e980f516282c7bf")},
|
||||
chunk{2453512, 0x001e197c92600000, parseDigest("5fa870ed107c67704258e5e50abe67509fb73562caf77caa843b5f243425d853")},
|
||||
chunk{2651975, 0x000ae6c868000000, parseDigest("181347d2bbec32bef77ad5e9001e6af80f6abcf3576549384d334ee00c1988d8")},
|
||||
chunk{237392, 0x0000000000000001, parseDigest("fcd567f5d866357a8e299fd5b2359bb2c8157c30395229c4e9b0a353944a7978")},
|
||||
}
|
||||
|
||||
// test if nullbytes are correctly split, even if length is a multiple of MinSize.
|
||||
var chunks2 = []chunk{
|
||||
chunk{MinSize, 0, parseDigest("07854d2fef297a06ba81685e660c332de36d5d18d546927d30daad6d7fda1541")},
|
||||
chunk{MinSize, 0, parseDigest("07854d2fef297a06ba81685e660c332de36d5d18d546927d30daad6d7fda1541")},
|
||||
chunk{MinSize, 0, parseDigest("07854d2fef297a06ba81685e660c332de36d5d18d546927d30daad6d7fda1541")},
|
||||
chunk{MinSize, 0, parseDigest("07854d2fef297a06ba81685e660c332de36d5d18d546927d30daad6d7fda1541")},
|
||||
}
|
||||
|
||||
// the same as chunks1, but avg chunksize is 1<<19
|
||||
var chunks3 = []chunk{
|
||||
chunk{1491586, 0x00023e586ea80000, parseDigest("4c008237df602048039287427171cef568a6cb965d1b5ca28dc80504a24bb061")},
|
||||
chunk{671874, 0x000b98d4cdf00000, parseDigest("fa8a42321b90c3d4ce9dd850562b2fd0c0fe4bdd26cf01a24f22046a224225d3")},
|
||||
chunk{643703, 0x000d4e8364d00000, parseDigest("5727a63c0964f365ab8ed2ccf604912f2ea7be29759a2b53ede4d6841e397407")},
|
||||
chunk{1284146, 0x0012b527e4780000, parseDigest("16d04cafecbeae9eaedd49da14c7ad7cdc2b1cc8569e5c16c32c9fb045aa899a")},
|
||||
chunk{823366, 0x000d1d6752180000, parseDigest("48662c118514817825ad4761e8e2e5f28f9bd8281b07e95dcafc6d02e0aa45c3")},
|
||||
chunk{810134, 0x0016071b6e180000, parseDigest("f629581aa05562f97f2c359890734c8574c5575da32f9289c5ba70bfd05f3f46")},
|
||||
chunk{567118, 0x00102a8242e00000, parseDigest("d4f0797c56c60d01bac33bfd49957a4816b6c067fc155b026de8a214cab4d70a")},
|
||||
chunk{821315, 0x001b3e42c8180000, parseDigest("8ebd0fd5db0293bd19140da936eb8b1bbd3cd6ffbec487385b956790014751ca")},
|
||||
chunk{1401057, 0x00045da878000000, parseDigest("001360af59adf4871ef138cfa2bb49007e86edaf5ac2d6f0b3d3014510991848")},
|
||||
chunk{2311122, 0x0005cbd885380000, parseDigest("8276d489b566086d9da95dc5c5fe6fc7d72646dd3308ced6b5b6ddb8595f0aa1")},
|
||||
chunk{608723, 0x001cfcd86f280000, parseDigest("518db33ba6a79d4f3720946f3785c05b9611082586d47ea58390fc2f6de9449e")},
|
||||
chunk{980456, 0x0013edb7a7f80000, parseDigest("0121b1690738395e15fecba1410cd0bf13fde02225160cad148829f77e7b6c99")},
|
||||
chunk{1140278, 0x0001f9f017e80000, parseDigest("28ca7c74804b5075d4f5eeb11f0845d99f62e8ea3a42b9a05c7bd5f2fca619dd")},
|
||||
chunk{2015542, 0x00097bf5d8180000, parseDigest("6fe8291f427d48650a5f0f944305d3a2dbc649bd401d2655fc0bdd42e890ca5a")},
|
||||
chunk{904752, 0x000e1863eff80000, parseDigest("62af1f1eb3f588d18aff28473303cc4731fc3cafcc52ce818fee3c4c2820854d")},
|
||||
chunk{713072, 0x001f3bb1b9b80000, parseDigest("4bda9dc2e3031d004d87a5cc93fe5207c4b0843186481b8f31597dc6ffa1496c")},
|
||||
chunk{675937, 0x001fec043c700000, parseDigest("5299c8c5acec1b90bb020cd75718aab5e12abb9bf66291465fd10e6a823a8b4a")},
|
||||
chunk{1525894, 0x000b1574b1500000, parseDigest("2f238180e4ca1f7520a05f3d6059233926341090f9236ce677690c1823eccab3")},
|
||||
chunk{1352720, 0x00018965f2e00000, parseDigest("afd12f13286a3901430de816e62b85cc62468c059295ce5888b76b3af9028d84")},
|
||||
chunk{811884, 0x00155628aa100000, parseDigest("42d0cdb1ee7c48e552705d18e061abb70ae7957027db8ae8db37ec756472a70a")},
|
||||
chunk{1282314, 0x001909a0a1400000, parseDigest("819721c2457426eb4f4c7565050c44c32076a56fa9b4515a1c7796441730eb58")},
|
||||
chunk{1093738, 0x0017f5d048880000, parseDigest("5dddfa7a241b68f65d267744bdb082ee865f3c2f0d8b946ea0ee47868a01bbff")},
|
||||
chunk{962003, 0x000b921f7ef80000, parseDigest("0cb5c9ebba196b441c715c8d805f6e7143a81cd5b0d2c65c6aacf59ca9124af9")},
|
||||
chunk{856384, 0x00030ce2d9400000, parseDigest("7734b206d46f3f387e8661e81edf5b1a91ea681867beb5831c18aaa86632d7fb")},
|
||||
chunk{533758, 0x0004435c53c00000, parseDigest("4da778a25b72a9a0d53529eccfe2e5865a789116cb1800f470d8df685a8ab05d")},
|
||||
chunk{1128303, 0x0000c48517800000, parseDigest("08c6b0b38095b348d80300f0be4c5184d2744a17147c2cba5cc4315abf4c048f")},
|
||||
chunk{800374, 0x000968473f900000, parseDigest("820284d2c8fd243429674c996d8eb8d3450cbc32421f43113e980f516282c7bf")},
|
||||
chunk{2453512, 0x001e197c92600000, parseDigest("5fa870ed107c67704258e5e50abe67509fb73562caf77caa843b5f243425d853")},
|
||||
chunk{665901, 0x00118c842cb80000, parseDigest("deceec26163842fdef6560311c69bf8a9871a56e16d719e2c4b7e4d668ceb61f")},
|
||||
chunk{1986074, 0x000ae6c868000000, parseDigest("64cd64bf3c3bc389eb20df8310f0427d1c36ab2eaaf09e346bfa7f0453fc1a18")},
|
||||
chunk{237392, 0x0000000000000001, parseDigest("fcd567f5d866357a8e299fd5b2359bb2c8157c30395229c4e9b0a353944a7978")},
|
||||
}
|
||||
|
||||
func testWithData(t *testing.T, chnker *Chunker, testChunks []chunk, checkDigest bool) []Chunk {
|
||||
chunks := []Chunk{}
|
||||
|
||||
pos := uint(0)
|
||||
for i, chunk := range testChunks {
|
||||
c, err := chnker.Next(nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error returned with chunk %d: %v", i, err)
|
||||
}
|
||||
|
||||
if c.Start != pos {
|
||||
t.Fatalf("Start for chunk %d does not match: expected %d, got %d",
|
||||
i, pos, c.Start)
|
||||
}
|
||||
|
||||
if c.Length != chunk.Length {
|
||||
t.Fatalf("Length for chunk %d does not match: expected %d, got %d",
|
||||
i, chunk.Length, c.Length)
|
||||
}
|
||||
|
||||
if c.Cut != chunk.CutFP {
|
||||
t.Fatalf("Cut fingerprint for chunk %d/%d does not match: expected %016x, got %016x",
|
||||
i, len(chunks)-1, chunk.CutFP, c.Cut)
|
||||
}
|
||||
|
||||
if checkDigest {
|
||||
digest := hashData(c.Data)
|
||||
if !bytes.Equal(chunk.Digest, digest) {
|
||||
t.Fatalf("Digest fingerprint for chunk %d/%d does not match: expected %02x, got %02x",
|
||||
i, len(chunks)-1, chunk.Digest, digest)
|
||||
}
|
||||
}
|
||||
|
||||
pos += c.Length
|
||||
chunks = append(chunks, c)
|
||||
}
|
||||
|
||||
_, err := chnker.Next(nil)
|
||||
if err != io.EOF {
|
||||
t.Fatal("Wrong error returned after last chunk")
|
||||
}
|
||||
|
||||
if len(chunks) != len(testChunks) {
|
||||
t.Fatal("Amounts of test and resulting chunks do not match")
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
func getRandom(seed int64, count int) []byte {
|
||||
buf := make([]byte, count)
|
||||
|
||||
rnd := rand.New(rand.NewSource(seed))
|
||||
for i := 0; i < count; i += 4 {
|
||||
r := rnd.Uint32()
|
||||
buf[i] = byte(r)
|
||||
buf[i+1] = byte(r >> 8)
|
||||
buf[i+2] = byte(r >> 16)
|
||||
buf[i+3] = byte(r >> 24)
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func hashData(d []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(d)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func TestChunker(t *testing.T) {
|
||||
// setup data source
|
||||
buf := getRandom(23, 32*1024*1024)
|
||||
ch := New(bytes.NewReader(buf), testPol)
|
||||
testWithData(t, ch, chunks1, true)
|
||||
|
||||
// setup nullbyte data source
|
||||
buf = bytes.Repeat([]byte{0}, len(chunks2)*MinSize)
|
||||
ch = New(bytes.NewReader(buf), testPol)
|
||||
|
||||
testWithData(t, ch, chunks2, true)
|
||||
}
|
||||
|
||||
func TestChunkerWithCustomAverageBits(t *testing.T) {
|
||||
buf := getRandom(23, 32*1024*1024)
|
||||
ch := New(bytes.NewReader(buf), testPol)
|
||||
|
||||
// sligthly decrease averageBits to get more chunks
|
||||
ch.SetAverageBits(19)
|
||||
testWithData(t, ch, chunks3, true)
|
||||
}
|
||||
|
||||
func TestChunkerReset(t *testing.T) {
|
||||
buf := getRandom(23, 32*1024*1024)
|
||||
ch := New(bytes.NewReader(buf), testPol)
|
||||
testWithData(t, ch, chunks1, true)
|
||||
|
||||
ch.Reset(bytes.NewReader(buf), testPol)
|
||||
testWithData(t, ch, chunks1, true)
|
||||
}
|
||||
|
||||
func TestChunkerWithRandomPolynomial(t *testing.T) {
|
||||
// setup data source
|
||||
buf := getRandom(23, 32*1024*1024)
|
||||
|
||||
// generate a new random polynomial
|
||||
start := time.Now()
|
||||
p, err := RandomPolynomial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("generating random polynomial took %v", time.Since(start))
|
||||
|
||||
start = time.Now()
|
||||
ch := New(bytes.NewReader(buf), p)
|
||||
t.Logf("creating chunker took %v", time.Since(start))
|
||||
|
||||
// make sure that first chunk is different
|
||||
c, err := ch.Next(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Cut == chunks1[0].CutFP {
|
||||
t.Fatal("Cut point is the same")
|
||||
}
|
||||
|
||||
if c.Length == chunks1[0].Length {
|
||||
t.Fatal("Length is the same")
|
||||
}
|
||||
|
||||
if bytes.Equal(hashData(c.Data), chunks1[0].Digest) {
|
||||
t.Fatal("Digest is the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkerWithoutHash(t *testing.T) {
|
||||
// setup data source
|
||||
buf := getRandom(23, 32*1024*1024)
|
||||
|
||||
ch := New(bytes.NewReader(buf), testPol)
|
||||
chunks := testWithData(t, ch, chunks1, false)
|
||||
|
||||
// test reader
|
||||
for i, c := range chunks {
|
||||
if uint(len(c.Data)) != chunks1[i].Length {
|
||||
t.Fatalf("reader returned wrong number of bytes: expected %d, got %d",
|
||||
chunks1[i].Length, len(c.Data))
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf[c.Start:c.Start+c.Length], c.Data) {
|
||||
t.Fatalf("invalid data for chunk returned: expected %02x, got %02x",
|
||||
buf[c.Start:c.Start+c.Length], c.Data)
|
||||
}
|
||||
}
|
||||
|
||||
// setup nullbyte data source
|
||||
buf = bytes.Repeat([]byte{0}, len(chunks2)*MinSize)
|
||||
ch = New(bytes.NewReader(buf), testPol)
|
||||
|
||||
testWithData(t, ch, chunks2, false)
|
||||
}
|
||||
|
||||
func benchmarkChunker(b *testing.B, checkDigest bool) {
|
||||
size := 32 * 1024 * 1024
|
||||
rd := bytes.NewReader(getRandom(23, size))
|
||||
ch := New(rd, testPol)
|
||||
buf := make([]byte, MaxSize)
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(size))
|
||||
|
||||
var chunks int
|
||||
for i := 0; i < b.N; i++ {
|
||||
chunks = 0
|
||||
|
||||
_, err := rd.Seek(0, 0)
|
||||
if err != nil {
|
||||
b.Fatalf("Seek() return error %v", err)
|
||||
}
|
||||
|
||||
ch.Reset(rd, testPol)
|
||||
|
||||
cur := 0
|
||||
for {
|
||||
chunk, err := ch.Next(buf)
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
b.Fatalf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
|
||||
if chunk.Length != chunks1[cur].Length {
|
||||
b.Errorf("wrong chunk length, want %d, got %d",
|
||||
chunks1[cur].Length, chunk.Length)
|
||||
}
|
||||
|
||||
if chunk.Cut != chunks1[cur].CutFP {
|
||||
b.Errorf("wrong cut fingerprint, want 0x%x, got 0x%x",
|
||||
chunks1[cur].CutFP, chunk.Cut)
|
||||
}
|
||||
|
||||
if checkDigest {
|
||||
h := hashData(chunk.Data)
|
||||
if !bytes.Equal(h, chunks1[cur].Digest) {
|
||||
b.Errorf("wrong digest, want %x, got %x",
|
||||
chunks1[cur].Digest, h)
|
||||
}
|
||||
}
|
||||
|
||||
chunks++
|
||||
cur++
|
||||
}
|
||||
}
|
||||
|
||||
b.Logf("%d chunks, average chunk size: %d bytes", chunks, size/chunks)
|
||||
}
|
||||
|
||||
func BenchmarkChunkerWithSHA256(b *testing.B) {
|
||||
benchmarkChunker(b, true)
|
||||
}
|
||||
|
||||
func BenchmarkChunker(b *testing.B) {
|
||||
benchmarkChunker(b, false)
|
||||
}
|
||||
|
||||
func BenchmarkNewChunker(b *testing.B) {
|
||||
p, err := RandomPolynomial()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
New(bytes.NewBuffer(nil), p)
|
||||
}
|
||||
}
|
39
chunker/example_test.go
Normal file
39
chunker/example_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func ExampleChunker() {
|
||||
// generate 32MiB of deterministic pseudo-random data
|
||||
data := getRandom(23, 32*1024*1024)
|
||||
|
||||
// create a chunker
|
||||
chunker := New(bytes.NewReader(data), Pol(0x3DA3358B4DC173))
|
||||
|
||||
// reuse this buffer
|
||||
buf := make([]byte, 8*1024*1024)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
chunk, err := chunker.Next(buf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%d %02x\n", chunk.Length, sha256.Sum256(chunk.Data))
|
||||
}
|
||||
|
||||
// Output:
|
||||
// 2163460 4b94cb2cf293855ea43bf766731c74969b91aa6bf3c078719aabdd19860d590d
|
||||
// 643703 5727a63c0964f365ab8ed2ccf604912f2ea7be29759a2b53ede4d6841e397407
|
||||
// 1528956 a73759636a1e7a2758767791c69e81b69fb49236c6929e5d1b654e06e37674ba
|
||||
// 1955808 c955fb059409b25f07e5ae09defbbc2aadf117c97a3724e06ad4abd2787e6824
|
||||
// 2222372 6ba5e9f7e1b310722be3627716cf469be941f7f3e39a4c3bcefea492ec31ee56
|
||||
}
|
3
chunker/go.mod
Normal file
3
chunker/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/restic/chunker
|
||||
|
||||
go 1.14
|
@@ -94,7 +94,6 @@ func (x Pol) Deg() int {
|
||||
}
|
||||
|
||||
if uint64(x)&0x2 > 0 {
|
||||
x >>= 1
|
||||
r |= 1
|
||||
}
|
||||
|
424
chunker/polynomials_test.go
Normal file
424
chunker/polynomials_test.go
Normal file
@@ -0,0 +1,424 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var polAddTests = []struct {
|
||||
x, y Pol
|
||||
sum Pol
|
||||
}{
|
||||
{23, 16, 23 ^ 16},
|
||||
{0x9a7e30d1e855e0a0, 0x670102a1f4bcd414, 0xfd7f32701ce934b4},
|
||||
{0x9a7e30d1e855e0a0, 0x9a7e30d1e855e0a0, 0},
|
||||
}
|
||||
|
||||
func TestPolAdd(t *testing.T) {
|
||||
for i, test := range polAddTests {
|
||||
if test.sum != test.x.Add(test.y) {
|
||||
t.Errorf("test %d failed: sum != x+y", i)
|
||||
}
|
||||
|
||||
if test.sum != test.y.Add(test.x) {
|
||||
t.Errorf("test %d failed: sum != y+x", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseBin(s string) Pol {
|
||||
i, err := strconv.ParseUint(s, 2, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return Pol(i)
|
||||
}
|
||||
|
||||
var polMulTests = []struct {
|
||||
x, y Pol
|
||||
res Pol
|
||||
}{
|
||||
{1, 2, 2},
|
||||
{
|
||||
parseBin("1101"),
|
||||
parseBin("10"),
|
||||
parseBin("11010"),
|
||||
},
|
||||
{
|
||||
parseBin("1101"),
|
||||
parseBin("11"),
|
||||
parseBin("10111"),
|
||||
},
|
||||
{
|
||||
0x40000000,
|
||||
0x40000000,
|
||||
0x1000000000000000,
|
||||
},
|
||||
{
|
||||
parseBin("1010"),
|
||||
parseBin("100100"),
|
||||
parseBin("101101000"),
|
||||
},
|
||||
{
|
||||
parseBin("100"),
|
||||
parseBin("11"),
|
||||
parseBin("1100"),
|
||||
},
|
||||
{
|
||||
parseBin("11"),
|
||||
parseBin("110101"),
|
||||
parseBin("1011111"),
|
||||
},
|
||||
{
|
||||
parseBin("10011"),
|
||||
parseBin("110101"),
|
||||
parseBin("1100001111"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestPolMul(t *testing.T) {
|
||||
for i, test := range polMulTests {
|
||||
m := test.x.Mul(test.y)
|
||||
if test.res != m {
|
||||
t.Errorf("TestPolMul failed for test %d: %v * %v: want %v, got %v",
|
||||
i, test.x, test.y, test.res, m)
|
||||
}
|
||||
m = test.y.Mul(test.x)
|
||||
if test.res != test.y.Mul(test.x) {
|
||||
t.Errorf("TestPolMul failed for %d: %v * %v: want %v, got %v",
|
||||
i, test.x, test.y, test.res, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPolMulOverflow(t *testing.T) {
|
||||
defer func() {
|
||||
// try to recover overflow error
|
||||
err := recover()
|
||||
|
||||
if e, ok := err.(string); ok && e == "multiplication would overflow uint64" {
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("invalid error raised: %v", err)
|
||||
// re-raise error if not overflow
|
||||
panic(err)
|
||||
}()
|
||||
|
||||
x := Pol(1 << 63)
|
||||
x.Mul(2)
|
||||
t.Fatal("overflow test did not panic")
|
||||
}
|
||||
|
||||
var polDivTests = []struct {
|
||||
x, y Pol
|
||||
res Pol
|
||||
}{
|
||||
{10, 50, 0},
|
||||
{0, 1, 0},
|
||||
{
|
||||
parseBin("101101000"), // 0x168
|
||||
parseBin("1010"), // 0xa
|
||||
parseBin("100100"), // 0x24
|
||||
},
|
||||
{2, 2, 1},
|
||||
{
|
||||
0x8000000000000000,
|
||||
0x8000000000000000,
|
||||
1,
|
||||
},
|
||||
{
|
||||
parseBin("1100"),
|
||||
parseBin("100"),
|
||||
parseBin("11"),
|
||||
},
|
||||
{
|
||||
parseBin("1100001111"),
|
||||
parseBin("10011"),
|
||||
parseBin("110101"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestPolDiv(t *testing.T) {
|
||||
for i, test := range polDivTests {
|
||||
m := test.x.Div(test.y)
|
||||
if test.res != m {
|
||||
t.Errorf("TestPolDiv failed for test %d: %v * %v: want %v, got %v",
|
||||
i, test.x, test.y, test.res, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPolDeg(t *testing.T) {
|
||||
var x Pol
|
||||
if x.Deg() != -1 {
|
||||
t.Errorf("deg(0) is not -1: %v", x.Deg())
|
||||
}
|
||||
|
||||
x = 1
|
||||
if x.Deg() != 0 {
|
||||
t.Errorf("deg(1) is not 0: %v", x.Deg())
|
||||
}
|
||||
|
||||
for i := 0; i < 64; i++ {
|
||||
x = 1 << uint(i)
|
||||
if x.Deg() != i {
|
||||
t.Errorf("deg(1<<%d) is not %d: %v", i, i, x.Deg())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var polModTests = []struct {
|
||||
x, y Pol
|
||||
res Pol
|
||||
}{
|
||||
{10, 50, 10},
|
||||
{0, 1, 0},
|
||||
{
|
||||
parseBin("101101001"),
|
||||
parseBin("1010"),
|
||||
parseBin("1"),
|
||||
},
|
||||
{2, 2, 0},
|
||||
{
|
||||
0x8000000000000000,
|
||||
0x8000000000000000,
|
||||
0,
|
||||
},
|
||||
{
|
||||
parseBin("1100"),
|
||||
parseBin("100"),
|
||||
parseBin("0"),
|
||||
},
|
||||
{
|
||||
parseBin("1100001111"),
|
||||
parseBin("10011"),
|
||||
parseBin("0"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestPolModt(t *testing.T) {
|
||||
for i, test := range polModTests {
|
||||
res := test.x.Mod(test.y)
|
||||
if test.res != res {
|
||||
t.Errorf("test %d failed: want %v, got %v", i, test.res, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolDivMod(t *testing.B) {
|
||||
f := Pol(0x2482734cacca49)
|
||||
g := Pol(0x3af4b284899)
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
g.DivMod(f)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolDiv(t *testing.B) {
|
||||
f := Pol(0x2482734cacca49)
|
||||
g := Pol(0x3af4b284899)
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
g.Div(f)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolMod(t *testing.B) {
|
||||
f := Pol(0x2482734cacca49)
|
||||
g := Pol(0x3af4b284899)
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
g.Mod(f)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolDeg(t *testing.B) {
|
||||
f := Pol(0x3af4b284899)
|
||||
d := f.Deg()
|
||||
if d != 41 {
|
||||
t.Fatalf("BenchmalPolDeg: Wrong degree %d returned, expected %d",
|
||||
d, 41)
|
||||
}
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
f.Deg()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomPolynomial(t *testing.T) {
|
||||
_, err := RandomPolynomial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRandomPolynomial(t *testing.B) {
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, err := RandomPolynomial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpandPolynomial(t *testing.T) {
|
||||
pol := Pol(0x3DA3358B4DC173)
|
||||
s := pol.Expand()
|
||||
if s != "x^53+x^52+x^51+x^50+x^48+x^47+x^45+x^41+x^40+x^37+x^36+x^34+x^32+x^31+x^27+x^25+x^24+x^22+x^19+x^18+x^16+x^15+x^14+x^8+x^6+x^5+x^4+x+1" {
|
||||
t.Fatal("wrong result")
|
||||
}
|
||||
}
|
||||
|
||||
var polIrredTests = []struct {
|
||||
f Pol
|
||||
irred bool
|
||||
}{
|
||||
{0x38f1e565e288df, false},
|
||||
{0x3DA3358B4DC173, true},
|
||||
{0x30a8295b9d5c91, false},
|
||||
{0x255f4350b962cb, false},
|
||||
{0x267f776110a235, false},
|
||||
{0x2f4dae10d41227, false},
|
||||
{0x2482734cacca49, true},
|
||||
{0x312daf4b284899, false},
|
||||
{0x29dfb6553d01d1, false},
|
||||
{0x3548245eb26257, false},
|
||||
{0x3199e7ef4211b3, false},
|
||||
{0x362f39017dae8b, false},
|
||||
{0x200d57aa6fdacb, false},
|
||||
{0x35e0a4efa1d275, false},
|
||||
{0x2ced55b026577f, false},
|
||||
{0x260b012010893d, false},
|
||||
{0x2df29cbcd59e9d, false},
|
||||
{0x3f2ac7488bd429, false},
|
||||
{0x3e5cb1711669fb, false},
|
||||
{0x226d8de57a9959, false},
|
||||
{0x3c8de80aaf5835, false},
|
||||
{0x2026a59efb219b, false},
|
||||
{0x39dfa4d13fb231, false},
|
||||
{0x3143d0464b3299, false},
|
||||
}
|
||||
|
||||
func TestPolIrreducible(t *testing.T) {
|
||||
for _, test := range polIrredTests {
|
||||
if test.f.Irreducible() != test.irred {
|
||||
t.Errorf("Irreducibility test for Polynomial %v failed: got %v, wanted %v",
|
||||
test.f, test.f.Irreducible(), test.irred)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolIrreducible(b *testing.B) {
|
||||
// find first irreducible polynomial
|
||||
var pol Pol
|
||||
for _, test := range polIrredTests {
|
||||
if test.irred {
|
||||
pol = test.f
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if !pol.Irreducible() {
|
||||
b.Errorf("Irreducibility test for Polynomial %v failed", pol)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var polGCDTests = []struct {
|
||||
f1 Pol
|
||||
f2 Pol
|
||||
gcd Pol
|
||||
}{
|
||||
{10, 50, 2},
|
||||
{0, 1, 1},
|
||||
{
|
||||
parseBin("101101001"),
|
||||
parseBin("1010"),
|
||||
parseBin("1"),
|
||||
},
|
||||
{2, 2, 2},
|
||||
{
|
||||
parseBin("1010"),
|
||||
parseBin("11"),
|
||||
parseBin("11"),
|
||||
},
|
||||
{
|
||||
0x8000000000000000,
|
||||
0x8000000000000000,
|
||||
0x8000000000000000,
|
||||
},
|
||||
{
|
||||
parseBin("1100"),
|
||||
parseBin("101"),
|
||||
parseBin("11"),
|
||||
},
|
||||
{
|
||||
parseBin("1100001111"),
|
||||
parseBin("10011"),
|
||||
parseBin("10011"),
|
||||
},
|
||||
{
|
||||
0x3DA3358B4DC173,
|
||||
0x3DA3358B4DC173,
|
||||
0x3DA3358B4DC173,
|
||||
},
|
||||
{
|
||||
0x3DA3358B4DC173,
|
||||
0x230d2259defd,
|
||||
1,
|
||||
},
|
||||
{
|
||||
0x230d2259defd,
|
||||
0x51b492b3eff2,
|
||||
parseBin("10011"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestPolGCD(t *testing.T) {
|
||||
for i, test := range polGCDTests {
|
||||
gcd := test.f1.GCD(test.f2)
|
||||
if test.gcd != gcd {
|
||||
t.Errorf("GCD test %d (%+v) failed: got %v, wanted %v",
|
||||
i, test, gcd, test.gcd)
|
||||
}
|
||||
|
||||
gcd = test.f2.GCD(test.f1)
|
||||
if test.gcd != gcd {
|
||||
t.Errorf("GCD test %d (%+v) failed: got %v, wanted %v",
|
||||
i, test, gcd, test.gcd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var polMulModTests = []struct {
|
||||
f1 Pol
|
||||
f2 Pol
|
||||
g Pol
|
||||
mod Pol
|
||||
}{
|
||||
{
|
||||
0x1230,
|
||||
0x230,
|
||||
0x55,
|
||||
0x22,
|
||||
},
|
||||
{
|
||||
0x0eae8c07dbbb3026,
|
||||
0xd5d6db9de04771de,
|
||||
0xdd2bda3b77c9,
|
||||
0x425ae8595b7a,
|
||||
},
|
||||
}
|
||||
|
||||
func TestPolMulMod(t *testing.T) {
|
||||
for i, test := range polMulModTests {
|
||||
mod := test.f1.MulMod(test.f2, test.g)
|
||||
if mod != test.mod {
|
||||
t.Errorf("MulMod test %d (%+v) failed: got %v, wanted %v",
|
||||
i, test, mod, test.mod)
|
||||
}
|
||||
}
|
||||
}
|
131
cmd/restic/acl.go
Normal file
131
cmd/restic/acl.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package main
|
||||
|
||||
// Adapted from https://github.com/maxymania/go-system/blob/master/posix_acl/posix_acl.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
aclUserOwner = 0x0001
|
||||
aclUser = 0x0002
|
||||
aclGroupOwner = 0x0004
|
||||
aclGroup = 0x0008
|
||||
aclMask = 0x0010
|
||||
aclOthers = 0x0020
|
||||
)
|
||||
|
||||
type aclSID uint64
|
||||
|
||||
type aclElem struct {
|
||||
Tag uint16
|
||||
Perm uint16
|
||||
ID uint32
|
||||
}
|
||||
|
||||
type acl struct {
|
||||
Version uint32
|
||||
List []aclElement
|
||||
}
|
||||
|
||||
type aclElement struct {
|
||||
aclSID
|
||||
Perm uint16
|
||||
}
|
||||
|
||||
func (a *aclSID) setUID(uid uint32) {
|
||||
*a = aclSID(uid) | (aclUser << 32)
|
||||
}
|
||||
func (a *aclSID) setGID(gid uint32) {
|
||||
*a = aclSID(gid) | (aclGroup << 32)
|
||||
}
|
||||
|
||||
func (a *aclSID) setType(tp int) {
|
||||
*a = aclSID(tp) << 32
|
||||
}
|
||||
|
||||
func (a aclSID) getType() int {
|
||||
return int(a >> 32)
|
||||
}
|
||||
func (a aclSID) getID() uint32 {
|
||||
return uint32(a & 0xffffffff)
|
||||
}
|
||||
func (a aclSID) String() string {
|
||||
switch a >> 32 {
|
||||
case aclUserOwner:
|
||||
return "user::"
|
||||
case aclUser:
|
||||
return fmt.Sprintf("user:%v:", a.getID())
|
||||
case aclGroupOwner:
|
||||
return "group::"
|
||||
case aclGroup:
|
||||
return fmt.Sprintf("group:%v:", a.getID())
|
||||
case aclMask:
|
||||
return "mask::"
|
||||
case aclOthers:
|
||||
return "other::"
|
||||
}
|
||||
return "?:"
|
||||
}
|
||||
|
||||
func (a aclElement) String() string {
|
||||
str := ""
|
||||
if (a.Perm & 4) != 0 {
|
||||
str += "r"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
if (a.Perm & 2) != 0 {
|
||||
str += "w"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
if (a.Perm & 1) != 0 {
|
||||
str += "x"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", a.aclSID, str)
|
||||
}
|
||||
|
||||
func (a *acl) decode(xattr []byte) {
|
||||
var elem aclElement
|
||||
ae := new(aclElem)
|
||||
nr := bytes.NewReader(xattr)
|
||||
e := binary.Read(nr, binary.LittleEndian, &a.Version)
|
||||
if e != nil {
|
||||
a.Version = 0
|
||||
return
|
||||
}
|
||||
if len(a.List) > 0 {
|
||||
a.List = a.List[:0]
|
||||
}
|
||||
for binary.Read(nr, binary.LittleEndian, ae) == nil {
|
||||
elem.aclSID = (aclSID(ae.Tag) << 32) | aclSID(ae.ID)
|
||||
elem.Perm = ae.Perm
|
||||
a.List = append(a.List, elem)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *acl) encode() []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
ae := new(aclElem)
|
||||
binary.Write(buf, binary.LittleEndian, &a.Version)
|
||||
for _, elem := range a.List {
|
||||
ae.Tag = uint16(elem.getType())
|
||||
ae.Perm = elem.Perm
|
||||
ae.ID = elem.getID()
|
||||
binary.Write(buf, binary.LittleEndian, ae)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (a *acl) String() string {
|
||||
var finalacl string
|
||||
for _, acl := range a.List {
|
||||
finalacl += acl.String() + "\n"
|
||||
}
|
||||
return finalacl
|
||||
}
|
96
cmd/restic/acl_test.go
Normal file
96
cmd/restic/acl_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_acl_decode(t *testing.T) {
|
||||
type args struct {
|
||||
xattr []byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "decode string",
|
||||
args: args{
|
||||
xattr: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
|
||||
},
|
||||
want: "user::rw-\nuser:0:rwx\nuser:65534:rwx\ngroup::rwx\nmask::rwx\nother::r--\n",
|
||||
},
|
||||
{
|
||||
name: "decode fail",
|
||||
args: args{
|
||||
xattr: []byte("abctest"),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := &acl{}
|
||||
a.decode(tt.args.xattr)
|
||||
if tt.want != a.String() {
|
||||
t.Errorf("acl.decode() = %v, want: %v", a.String(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_acl_encode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want []byte
|
||||
args []aclElement
|
||||
}{
|
||||
{
|
||||
name: "encode values",
|
||||
want: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
|
||||
args: []aclElement{
|
||||
{
|
||||
aclSID: 8589934591,
|
||||
Perm: 6,
|
||||
},
|
||||
{
|
||||
aclSID: 8589934592,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 8590000126,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 21474836479,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 73014444031,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 141733920767,
|
||||
Perm: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "encode fail",
|
||||
want: []byte{2, 0, 0, 0},
|
||||
args: []aclElement{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := &acl{
|
||||
Version: 2,
|
||||
List: tt.args,
|
||||
}
|
||||
if got := a.encode(); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("acl.encode() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -20,7 +20,7 @@ var cleanupHandlers struct {
|
||||
var stderr = os.Stderr
|
||||
|
||||
func init() {
|
||||
cleanupHandlers.ch = make(chan os.Signal)
|
||||
cleanupHandlers.ch = make(chan os.Signal, 1)
|
||||
go CleanupHandler(cleanupHandlers.ch)
|
||||
signal.Notify(cleanupHandlers.ch, syscall.SIGINT)
|
||||
}
|
||||
|
@@ -5,8 +5,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -23,6 +25,7 @@ import (
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/textfile"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/restic/restic/internal/ui/json"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
@@ -32,6 +35,14 @@ var cmdBackup = &cobra.Command{
|
||||
Long: `
|
||||
The "backup" command creates a new snapshot and saves the files and directories
|
||||
given as the arguments.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
|
||||
Note that some issues such as unreadable or deleted files during backup
|
||||
currently doesn't result in a non-zero error exit status.
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
if backupOptions.Host == "" {
|
||||
@@ -68,20 +79,22 @@ given as the arguments.
|
||||
|
||||
// BackupOptions bundles all options for the backup command.
|
||||
type BackupOptions struct {
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
ExcludeFiles []string
|
||||
ExcludeOtherFS bool
|
||||
ExcludeIfPresent []string
|
||||
ExcludeCaches bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
Tags []string
|
||||
Host string
|
||||
FilesFrom []string
|
||||
TimeStamp string
|
||||
WithAtime bool
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
InsensitiveExcludes []string
|
||||
ExcludeFiles []string
|
||||
ExcludeOtherFS bool
|
||||
ExcludeIfPresent []string
|
||||
ExcludeCaches bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
Tags []string
|
||||
Host string
|
||||
FilesFrom []string
|
||||
TimeStamp string
|
||||
WithAtime bool
|
||||
IgnoreInode bool
|
||||
}
|
||||
|
||||
var backupOptions BackupOptions
|
||||
@@ -90,24 +103,26 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdBackup)
|
||||
|
||||
f := cmdBackup.Flags()
|
||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
|
||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repo that has the same target files/directories)")
|
||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
||||
f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
f.StringArrayVar(&backupOptions.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
|
||||
f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
|
||||
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
|
||||
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
|
||||
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file`)
|
||||
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
|
||||
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See http://bford.info/cachedir/spec.html for the Cache Directory Tagging Standard`)
|
||||
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "`filename` to use when reading from stdin")
|
||||
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
|
||||
|
||||
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
|
||||
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
|
||||
f.MarkDeprecated("hostname", "use --host")
|
||||
|
||||
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from file (can be combined with file args/can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
|
||||
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from `file` (can be combined with file args/can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.TimeStamp, "time", "", "`time` of the backup (ex. '2012-11-01 22:08:41') (default: now)")
|
||||
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
|
||||
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
|
||||
}
|
||||
|
||||
// filterExisting returns a slice of all existing items, or an error if no
|
||||
@@ -222,6 +237,10 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
|
||||
opts.Excludes = append(opts.Excludes, excludes...)
|
||||
}
|
||||
|
||||
if len(opts.InsensitiveExcludes) > 0 {
|
||||
fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
|
||||
}
|
||||
|
||||
if len(opts.Excludes) > 0 {
|
||||
fs = append(fs, rejectByPattern(opts.Excludes))
|
||||
}
|
||||
@@ -363,7 +382,7 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
|
||||
|
||||
// Find last snapshot to set it as parent, if not already set
|
||||
if !opts.Force && parentID == nil {
|
||||
id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, opts.Host)
|
||||
id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, []string{opts.Host})
|
||||
if err == nil {
|
||||
parentID = &id
|
||||
} else if err != restic.ErrNoSnapshotFound {
|
||||
@@ -395,13 +414,43 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
|
||||
var t tomb.Tomb
|
||||
|
||||
term.Print("open repository\n")
|
||||
if gopts.verbosity >= 2 && !gopts.JSON {
|
||||
term.Print("open repository\n")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := ui.NewBackup(term, gopts.verbosity)
|
||||
type ArchiveProgressReporter interface {
|
||||
CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
|
||||
StartFile(filename string)
|
||||
CompleteBlob(filename string, bytes uint64)
|
||||
ScannerError(item string, fi os.FileInfo, err error) error
|
||||
ReportTotal(item string, s archiver.ScanStats)
|
||||
SetMinUpdatePause(d time.Duration)
|
||||
Run(ctx context.Context) error
|
||||
Error(item string, fi os.FileInfo, err error) error
|
||||
Finish(snapshotID restic.ID)
|
||||
|
||||
// ui.StdioWrapper
|
||||
Stdout() io.WriteCloser
|
||||
Stderr() io.WriteCloser
|
||||
|
||||
// ui.Message
|
||||
E(msg string, args ...interface{})
|
||||
P(msg string, args ...interface{})
|
||||
V(msg string, args ...interface{})
|
||||
VV(msg string, args ...interface{})
|
||||
}
|
||||
|
||||
var p ArchiveProgressReporter
|
||||
if gopts.JSON {
|
||||
p = json.NewBackup(term, gopts.verbosity)
|
||||
} else {
|
||||
p = ui.NewBackup(term, gopts.verbosity)
|
||||
}
|
||||
|
||||
// use the terminal for stdout/stderr
|
||||
prevStdout, prevStderr := gopts.stdout, gopts.stderr
|
||||
@@ -416,13 +465,15 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
if fps > 60 {
|
||||
fps = 60
|
||||
}
|
||||
p.MinUpdatePause = time.Second / time.Duration(fps)
|
||||
p.SetMinUpdatePause(time.Second / time.Duration(fps))
|
||||
}
|
||||
}
|
||||
|
||||
t.Go(func() error { return p.Run(t.Context(gopts.ctx)) })
|
||||
|
||||
p.V("lock repository")
|
||||
if !gopts.JSON {
|
||||
p.V("lock repository")
|
||||
}
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -441,7 +492,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return err
|
||||
}
|
||||
|
||||
p.V("load index files")
|
||||
if !gopts.JSON {
|
||||
p.V("load index files")
|
||||
}
|
||||
err = repo.LoadIndex(gopts.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -452,7 +505,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return err
|
||||
}
|
||||
|
||||
if parentSnapshotID != nil {
|
||||
if !gopts.JSON && parentSnapshotID != nil {
|
||||
p.V("using parent snapshot %v\n", parentSnapshotID.Str())
|
||||
}
|
||||
|
||||
@@ -476,14 +529,17 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
|
||||
var targetFS fs.FS = fs.Local{}
|
||||
if opts.Stdin {
|
||||
p.V("read data from stdin")
|
||||
if !gopts.JSON {
|
||||
p.V("read data from stdin")
|
||||
}
|
||||
filename := path.Join("/", opts.StdinFilename)
|
||||
targetFS = &fs.Reader{
|
||||
ModTime: timeStamp,
|
||||
Name: opts.StdinFilename,
|
||||
Name: filename,
|
||||
Mode: 0644,
|
||||
ReadCloser: os.Stdin,
|
||||
}
|
||||
targets = []string{opts.StdinFilename}
|
||||
targets = []string{filename}
|
||||
}
|
||||
|
||||
sc := archiver.NewScanner(targetFS)
|
||||
@@ -492,7 +548,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
sc.Error = p.ScannerError
|
||||
sc.Result = p.ReportTotal
|
||||
|
||||
p.V("start scan on %v", targets)
|
||||
if !gopts.JSON {
|
||||
p.V("start scan on %v", targets)
|
||||
}
|
||||
t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) })
|
||||
|
||||
arch := archiver.New(repo, targetFS, archiver.Options{})
|
||||
@@ -500,9 +558,10 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
arch.Select = selectFilter
|
||||
arch.WithAtime = opts.WithAtime
|
||||
arch.Error = p.Error
|
||||
arch.CompleteItem = p.CompleteItemFn
|
||||
arch.CompleteItem = p.CompleteItem
|
||||
arch.StartFile = p.StartFile
|
||||
arch.CompleteBlob = p.CompleteBlob
|
||||
arch.IgnoreInode = opts.IgnoreInode
|
||||
|
||||
if parentSnapshotID == nil {
|
||||
parentSnapshotID = &restic.ID{}
|
||||
@@ -519,10 +578,14 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
uploader := archiver.IndexUploader{
|
||||
Repository: repo,
|
||||
Start: func() {
|
||||
p.VV("uploading intermediate index")
|
||||
if !gopts.JSON {
|
||||
p.VV("uploading intermediate index")
|
||||
}
|
||||
},
|
||||
Complete: func(id restic.ID) {
|
||||
p.V("uploaded intermediate index %v", id.Str())
|
||||
if !gopts.JSON {
|
||||
p.V("uploaded intermediate index %v", id.Str())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -530,23 +593,26 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second)
|
||||
})
|
||||
|
||||
p.V("start backup on %v", targets)
|
||||
if !gopts.JSON {
|
||||
p.V("start backup on %v", targets)
|
||||
}
|
||||
_, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts)
|
||||
if err != nil {
|
||||
return errors.Fatalf("unable to save snapshot: %v", err)
|
||||
}
|
||||
|
||||
p.Finish()
|
||||
p.P("snapshot %s saved\n", id.Str())
|
||||
|
||||
// cleanly shutdown all running goroutines
|
||||
t.Kill(nil)
|
||||
|
||||
// let's see if one returned an error
|
||||
err = t.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Report finished execution
|
||||
p.Finish(id)
|
||||
if !gopts.JSON {
|
||||
p.P("snapshot %s saved\n", id.Str())
|
||||
}
|
||||
|
||||
return nil
|
||||
// Return error if any
|
||||
return err
|
||||
}
|
||||
|
@@ -19,6 +19,11 @@ var cmdCache = &cobra.Command{
|
||||
Short: "Operate on local cache directories",
|
||||
Long: `
|
||||
The "cache" command allows listing and cleaning local cache directories.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -18,6 +18,11 @@ var cmdCat = &cobra.Command{
|
||||
Short: "Print internal objects to stdout",
|
||||
Long: `
|
||||
The "cat" command is used to print internal objects to stdout.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -74,7 +79,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "index":
|
||||
buf, err := repo.LoadAndDecrypt(gopts.ctx, restic.IndexFile, id)
|
||||
buf, err := repo.LoadAndDecrypt(gopts.ctx, nil, restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -99,7 +104,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
case "key":
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,7 +155,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
switch tpe {
|
||||
case "pack":
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -165,18 +170,15 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
|
||||
case "blob":
|
||||
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||
list, found := repo.Index().Lookup(id, t)
|
||||
_, found := repo.Index().Lookup(id, t)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
blob := list[0]
|
||||
|
||||
buf := make([]byte, blob.Length)
|
||||
n, err := repo.LoadBlob(gopts.ctx, t, id, buf)
|
||||
buf, err := repo.LoadBlob(gopts.ctx, t, id, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
return err
|
||||
|
@@ -25,6 +25,11 @@ finds. It can also be used to read all data and therefore simulate a restore.
|
||||
|
||||
By default, the "check" command will always load all data directly from the
|
||||
repository and not use a local cache.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -67,11 +72,17 @@ func checkFlags(opts CheckOptions) error {
|
||||
if dataSubset[0] == 0 || dataSubset[1] == 0 || dataSubset[0] > dataSubset[1] {
|
||||
return errors.Fatalf("check flag --read-data-subset=n/t values must be positive integers, and n <= t, e.g. --read-data-subset=1/2")
|
||||
}
|
||||
if dataSubset[1] > totalBucketsMax {
|
||||
return errors.Fatalf("check flag --read-data-subset=n/t t must be at most %d", totalBucketsMax)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// See doReadData in runCheck below for why this is 256.
|
||||
const totalBucketsMax = 256
|
||||
|
||||
// stringToIntSlice converts string to []uint, using '/' as element separator
|
||||
func stringToIntSlice(param string) (split []uint, err error) {
|
||||
if param == "" {
|
||||
@@ -257,6 +268,8 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
doReadData := func(bucket, totalBuckets uint) {
|
||||
packs := restic.IDSet{}
|
||||
for pack := range chkr.GetPacks() {
|
||||
// If we ever check more than the first byte
|
||||
// of pack, update totalBucketsMax.
|
||||
if (uint(pack[0]) % totalBuckets) == (bucket - 1) {
|
||||
packs.Insert(pack)
|
||||
}
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -27,7 +26,13 @@ var cmdDebugDump = &cobra.Command{
|
||||
Short: "Dump data structures",
|
||||
Long: `
|
||||
The "dump" command dumps data structures from the repository as JSON objects. It
|
||||
is used for debugging purposes only.`,
|
||||
is used for debugging purposes only.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runDebugDump(globalOptions, args)
|
||||
@@ -84,7 +89,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
|
||||
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), size)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id.Str(), err)
|
||||
fmt.Fprintf(globalOptions.stderr, "error for pack %v: %v\n", id.Str(), err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -101,13 +106,11 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
return prettyPrintJSON(os.Stdout, p)
|
||||
return prettyPrintJSON(wr, p)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpIndexes(repo restic.Repository) error {
|
||||
func dumpIndexes(repo restic.Repository, wr io.Writer) error {
|
||||
return repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
|
||||
fmt.Printf("index_id: %v\n", id)
|
||||
|
||||
@@ -116,7 +119,7 @@ func dumpIndexes(repo restic.Repository) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.Dump(os.Stdout)
|
||||
return idx.Dump(wr)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -138,29 +141,24 @@ func runDebugDump(gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex(gopts.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tpe := args[0]
|
||||
|
||||
switch tpe {
|
||||
case "indexes":
|
||||
return dumpIndexes(repo)
|
||||
return dumpIndexes(repo, gopts.stdout)
|
||||
case "snapshots":
|
||||
return debugPrintSnapshots(repo, os.Stdout)
|
||||
return debugPrintSnapshots(repo, gopts.stdout)
|
||||
case "packs":
|
||||
return printPacks(repo, os.Stdout)
|
||||
return printPacks(repo, gopts.stdout)
|
||||
case "all":
|
||||
fmt.Printf("snapshots:\n")
|
||||
err := debugPrintSnapshots(repo, os.Stdout)
|
||||
err := debugPrintSnapshots(repo, gopts.stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\nindexes:\n")
|
||||
err = dumpIndexes(repo)
|
||||
err = dumpIndexes(repo, gopts.stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -26,6 +26,11 @@ directory:
|
||||
* U The metadata (access mode, timestamps, ...) for the item was updated
|
||||
* M The file's content was modified
|
||||
* T The type was changed, e.g. a file was made a symlink
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -66,7 +71,7 @@ type Comparer struct {
|
||||
type DiffStat struct {
|
||||
Files, Dirs, Others int
|
||||
DataBlobs, TreeBlobs int
|
||||
Bytes int
|
||||
Bytes uint64
|
||||
}
|
||||
|
||||
// Add adds stats information for node to s.
|
||||
@@ -141,7 +146,7 @@ func updateBlobs(repo restic.Repository, blobs restic.BlobSet, stats *DiffStat)
|
||||
continue
|
||||
}
|
||||
|
||||
stats.Bytes += int(size)
|
||||
stats.Bytes += uint64(size)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,15 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/walker"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -23,6 +27,11 @@ prints its contents to stdout.
|
||||
|
||||
The special snapshot "latest" can be used to use the latest snapshot in the
|
||||
repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -32,7 +41,7 @@ repository.
|
||||
|
||||
// DumpOptions collects all options for the dump command.
|
||||
type DumpOptions struct {
|
||||
Host string
|
||||
Hosts []string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
}
|
||||
@@ -43,48 +52,25 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdDump)
|
||||
|
||||
flags := cmdDump.Flags()
|
||||
flags.StringVarP(&dumpOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
|
||||
flags.StringArrayVarP(&dumpOptions.Hosts, "host", "H", nil, `only consider snapshots for this host when the snapshot ID is "latest" (can be specified multiple times)`)
|
||||
flags.Var(&dumpOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"")
|
||||
flags.StringArrayVar(&dumpOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
|
||||
}
|
||||
|
||||
func splitPath(p string) []string {
|
||||
d, f := path.Split(p)
|
||||
if d == "" || d == "/" {
|
||||
if d == "" {
|
||||
return []string{f}
|
||||
}
|
||||
if d == "/" {
|
||||
return []string{d}
|
||||
}
|
||||
s := splitPath(path.Clean(d))
|
||||
return append(s, f)
|
||||
}
|
||||
|
||||
func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error {
|
||||
var buf []byte
|
||||
for _, id := range node.Content {
|
||||
size, found := repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if !found {
|
||||
return errors.Errorf("id %v not found in repository", id)
|
||||
}
|
||||
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string, pathToPrint string) error {
|
||||
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) < restic.CiphertextLength(int(size)) {
|
||||
buf = restic.NewBlobBuffer(int(size))
|
||||
}
|
||||
|
||||
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error {
|
||||
if tree == nil {
|
||||
return fmt.Errorf("called with a nil tree")
|
||||
}
|
||||
@@ -97,18 +83,21 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repositor
|
||||
}
|
||||
item := filepath.Join(prefix, pathComponents[0])
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Name == pathComponents[0] {
|
||||
if node.Name == pathComponents[0] || pathComponents[0] == "/" {
|
||||
switch {
|
||||
case l == 1 && node.Type == "file":
|
||||
return dumpNode(ctx, repo, node)
|
||||
return getNodeData(ctx, os.Stdout, repo, node)
|
||||
case l > 1 && node.Type == "dir":
|
||||
subtree, err := repo.LoadTree(ctx, *node.Subtree)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot load subtree for %q", item)
|
||||
}
|
||||
return printFromTree(ctx, subtree, repo, item, pathComponents[1:])
|
||||
return printFromTree(ctx, subtree, repo, item, pathComponents[1:], pathToPrint)
|
||||
case node.Type == "dir":
|
||||
node.Path = pathToPrint
|
||||
return tarTree(ctx, repo, node, pathToPrint)
|
||||
case l > 1:
|
||||
return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type)
|
||||
return fmt.Errorf("%q should be a dir, but is a %q", item, node.Type)
|
||||
case node.Type != "file":
|
||||
return fmt.Errorf("%q should be a file, but is a %q", item, node.Type)
|
||||
}
|
||||
@@ -129,7 +118,7 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
|
||||
|
||||
splittedPath := splitPath(pathToPrint)
|
||||
splittedPath := splitPath(path.Clean(pathToPrint))
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
@@ -152,9 +141,9 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
var id restic.ID
|
||||
|
||||
if snapshotIDString == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Hosts)
|
||||
if err != nil {
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Hosts:%v", err, opts.Paths, opts.Hosts)
|
||||
}
|
||||
} else {
|
||||
id, err = restic.FindSnapshot(repo, snapshotIDString)
|
||||
@@ -173,10 +162,134 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err)
|
||||
}
|
||||
|
||||
err = printFromTree(ctx, tree, repo, "", splittedPath)
|
||||
err = printFromTree(ctx, tree, repo, "", splittedPath, pathToPrint)
|
||||
if err != nil {
|
||||
Exitf(2, "cannot dump file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodeData(ctx context.Context, output io.Writer, repo restic.Repository, node *restic.Node) error {
|
||||
var (
|
||||
buf []byte
|
||||
err error
|
||||
)
|
||||
for _, id := range node.Content {
|
||||
buf, err = repo.LoadBlob(ctx, restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = output.Write(buf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tarTree(ctx context.Context, repo restic.Repository, rootNode *restic.Node, rootPath string) error {
|
||||
|
||||
if stdoutIsTerminal() {
|
||||
return fmt.Errorf("stdout is the terminal, please redirect output")
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(os.Stdout)
|
||||
defer tw.Close()
|
||||
|
||||
// If we want to dump "/" we'll need to add the name of the first node, too
|
||||
// as it would get lost otherwise.
|
||||
if rootNode.Path == "/" {
|
||||
rootNode.Path = path.Join(rootNode.Path, rootNode.Name)
|
||||
rootPath = rootNode.Path
|
||||
}
|
||||
|
||||
// we know that rootNode is a folder and walker.Walk will already process
|
||||
// the next node, so we have to tar this one first, too
|
||||
if err := tarNode(ctx, tw, rootNode, repo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := walker.Walk(ctx, repo, *rootNode.Subtree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
node.Path = path.Join(rootPath, nodepath)
|
||||
|
||||
if node.Type == "file" || node.Type == "symlink" || node.Type == "dir" {
|
||||
err := tarNode(ctx, tw, node, repo)
|
||||
if err != err {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func tarNode(ctx context.Context, tw *tar.Writer, node *restic.Node, repo restic.Repository) error {
|
||||
|
||||
header := &tar.Header{
|
||||
Name: node.Path,
|
||||
Size: int64(node.Size),
|
||||
Mode: int64(node.Mode),
|
||||
Uid: int(node.UID),
|
||||
Gid: int(node.GID),
|
||||
ModTime: node.ModTime,
|
||||
AccessTime: node.AccessTime,
|
||||
ChangeTime: node.ChangeTime,
|
||||
PAXRecords: parseXattrs(node.ExtendedAttributes),
|
||||
}
|
||||
|
||||
if node.Type == "symlink" {
|
||||
header.Typeflag = tar.TypeSymlink
|
||||
header.Linkname = node.LinkTarget
|
||||
}
|
||||
|
||||
if node.Type == "dir" {
|
||||
header.Typeflag = tar.TypeDir
|
||||
}
|
||||
|
||||
err := tw.WriteHeader(header)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "TarHeader ")
|
||||
}
|
||||
|
||||
return getNodeData(ctx, tw, repo, node)
|
||||
|
||||
}
|
||||
|
||||
func parseXattrs(xattrs []restic.ExtendedAttribute) map[string]string {
|
||||
tmpMap := make(map[string]string)
|
||||
|
||||
for _, attr := range xattrs {
|
||||
attrString := string(attr.Value)
|
||||
|
||||
if strings.HasPrefix(attr.Name, "system.posix_acl_") {
|
||||
na := acl{}
|
||||
na.decode(attr.Value)
|
||||
|
||||
if na.String() != "" {
|
||||
if strings.Contains(attr.Name, "system.posix_acl_access") {
|
||||
tmpMap["SCHILY.acl.access"] = na.String()
|
||||
} else if strings.Contains(attr.Name, "system.posix_acl_default") {
|
||||
tmpMap["SCHILY.acl.default"] = na.String()
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
tmpMap["SCHILY.xattr."+attr.Name] = attrString
|
||||
}
|
||||
}
|
||||
|
||||
return tmpMap
|
||||
}
|
||||
|
@@ -27,7 +27,13 @@ restic find --json "*.yml" "*.json"
|
||||
restic find --json --blob 420f620f b46ebe8a ddd38656
|
||||
restic find --show-pack-id --blob 420f620f
|
||||
restic find --tree 577c2bc9 f81f2e22 a62827a9
|
||||
restic find --pack 025c1d06`,
|
||||
restic find --pack 025c1d06
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runFind(findOptions, globalOptions, args)
|
||||
@@ -45,7 +51,7 @@ type FindOptions struct {
|
||||
PackID, ShowPackID bool
|
||||
CaseInsensitive bool
|
||||
ListLong bool
|
||||
Host string
|
||||
Hosts []string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
}
|
||||
@@ -62,11 +68,11 @@ func init() {
|
||||
f.BoolVar(&findOptions.BlobID, "blob", false, "pattern is a blob-ID")
|
||||
f.BoolVar(&findOptions.TreeID, "tree", false, "pattern is a tree-ID")
|
||||
f.BoolVar(&findOptions.PackID, "pack", false, "pattern is a pack-ID")
|
||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob)")
|
||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
||||
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
||||
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||
|
||||
f.StringVarP(&findOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
|
||||
f.StringArrayVarP(&findOptions.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when no snapshot ID is given (can be specified multiple times)")
|
||||
f.Var(&findOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given")
|
||||
f.StringArrayVar(&findOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
|
||||
}
|
||||
@@ -150,7 +156,7 @@ func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
||||
if s.hits > 0 {
|
||||
Printf(",")
|
||||
}
|
||||
Printf(string(b))
|
||||
Print(string(b))
|
||||
s.hits++
|
||||
}
|
||||
|
||||
@@ -160,9 +166,9 @@ func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) {
|
||||
Verbosef("\n")
|
||||
}
|
||||
s.oldsn = s.newsn
|
||||
Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID().Str())
|
||||
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
|
||||
}
|
||||
Printf(formatNode(path, node, s.ListLong) + "\n")
|
||||
Println(formatNode(path, node, s.ListLong))
|
||||
}
|
||||
|
||||
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
|
||||
@@ -201,7 +207,7 @@ func (s *statefulOutput) PrintObjectJSON(kind, id, nodepath, treeID string, sn *
|
||||
if s.hits > 0 {
|
||||
Printf(",")
|
||||
}
|
||||
Printf(string(b))
|
||||
Print(string(b))
|
||||
s.hits++
|
||||
}
|
||||
|
||||
@@ -258,9 +264,13 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
||||
}
|
||||
|
||||
f.out.newsn = sn
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
debug.Log("Error loading tree %v: %v", parentTreeID, err)
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
@@ -340,7 +350,11 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
|
||||
f.out.newsn = sn
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
debug.Log("Error loading tree %v: %v", parentTreeID, err)
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
@@ -442,27 +456,36 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Finder) findBlobsPacks(ctx context.Context) {
|
||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
||||
idx := f.repo.Index()
|
||||
|
||||
rid, err := restic.ParseID(id)
|
||||
if err != nil {
|
||||
Printf("Note: cannot find pack for object '%s', unable to parse ID: %v\n", id, err)
|
||||
return
|
||||
}
|
||||
|
||||
blobs, found := idx.Lookup(rid, t)
|
||||
if !found {
|
||||
Printf("Object %s not found in the index\n", rid.Str())
|
||||
return
|
||||
}
|
||||
|
||||
for _, b := range blobs {
|
||||
if b.ID.Equal(rid) {
|
||||
Printf("Object belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
||||
for i := range f.blobIDs {
|
||||
rid, err := restic.ParseID(i)
|
||||
if err != nil {
|
||||
Printf("Note: cannot find pack for blob '%s', unable to parse ID: %v\n", i, err)
|
||||
continue
|
||||
}
|
||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
||||
}
|
||||
|
||||
blobs, found := idx.Lookup(rid, restic.DataBlob)
|
||||
if !found {
|
||||
Printf("Blob %s not found in the index\n", rid.Str())
|
||||
continue
|
||||
}
|
||||
|
||||
for _, b := range blobs {
|
||||
if b.ID.Equal(rid) {
|
||||
Printf("Blob belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range f.treeIDs {
|
||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,7 +567,7 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
|
||||
f.packsToBlobs(ctx, []string{f.pat.pattern[0]}) // TODO: support multiple packs
|
||||
}
|
||||
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) {
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Hosts, opts.Tags, opts.Paths, opts.Snapshots) {
|
||||
if f.blobIDs != nil || f.treeIDs != nil {
|
||||
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
|
||||
return err
|
||||
@@ -557,8 +580,8 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
f.out.Finish()
|
||||
|
||||
if opts.ShowPackID && f.blobIDs != nil {
|
||||
f.findBlobsPacks(ctx)
|
||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||
f.findObjectsPacks(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -3,10 +3,8 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
"io"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -18,7 +16,13 @@ var cmdForget = &cobra.Command{
|
||||
The "forget" command removes snapshots according to a policy. Please note that
|
||||
this command really only deletes the snapshot object in the repository, which
|
||||
is a reference to data stored there. In order to remove this (now unreferenced)
|
||||
data after 'forget' was run successfully, see the 'prune' command. `,
|
||||
data after 'forget' was run successfully, see the 'prune' command.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runForget(forgetOptions, globalOptions, args)
|
||||
@@ -36,7 +40,7 @@ type ForgetOptions struct {
|
||||
Within restic.Duration
|
||||
KeepTags restic.TagLists
|
||||
|
||||
Host string
|
||||
Hosts []string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Compact bool
|
||||
@@ -62,8 +66,8 @@ func init() {
|
||||
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||
|
||||
f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)")
|
||||
f.StringVar(&forgetOptions.Host, "host", "", "only consider snapshots with the given `host`")
|
||||
f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname`")
|
||||
f.StringArrayVar(&forgetOptions.Hosts, "host", nil, "only consider snapshots with the given `host` (can be specified multiple times)")
|
||||
f.StringArrayVar(&forgetOptions.Hosts, "hostname", nil, "only consider snapshots with the given `hostname` (can be specified multiple times)")
|
||||
f.MarkDeprecated("hostname", "use --host")
|
||||
|
||||
f.Var(&forgetOptions.Tags, "tag", "only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)")
|
||||
@@ -90,153 +94,129 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// group by hostname and dirs
|
||||
type key struct {
|
||||
Hostname string
|
||||
Paths []string
|
||||
Tags []string
|
||||
}
|
||||
snapshotGroups := make(map[string]restic.Snapshots)
|
||||
|
||||
var GroupByTag bool
|
||||
var GroupByHost bool
|
||||
var GroupByPath bool
|
||||
var GroupOptionList []string
|
||||
|
||||
GroupOptionList = strings.Split(opts.GroupBy, ",")
|
||||
|
||||
for _, option := range GroupOptionList {
|
||||
switch option {
|
||||
case "host":
|
||||
GroupByHost = true
|
||||
case "paths":
|
||||
GroupByPath = true
|
||||
case "tags":
|
||||
GroupByTag = true
|
||||
case "":
|
||||
default:
|
||||
return errors.Fatal("unknown grouping option: '" + option + "'")
|
||||
}
|
||||
}
|
||||
|
||||
removeSnapshots := 0
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
if len(args) > 0 {
|
||||
// When explicit snapshots args are given, remove them immediately.
|
||||
|
||||
var snapshots restic.Snapshots
|
||||
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
// When explicit snapshots args are given, remove them immediately.
|
||||
for _, sn := range snapshots {
|
||||
if !opts.DryRun {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
if err = repo.Backend().Remove(gopts.ctx, h); err != nil {
|
||||
return err
|
||||
}
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
if !gopts.JSON {
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
}
|
||||
removeSnapshots++
|
||||
} else {
|
||||
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
|
||||
if !gopts.JSON {
|
||||
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Determining grouping-keys
|
||||
var tags []string
|
||||
var hostname string
|
||||
var paths []string
|
||||
|
||||
if GroupByTag {
|
||||
tags = sn.Tags
|
||||
sort.StringSlice(tags).Sort()
|
||||
}
|
||||
if GroupByHost {
|
||||
hostname = sn.Hostname
|
||||
}
|
||||
if GroupByPath {
|
||||
paths = sn.Paths
|
||||
}
|
||||
|
||||
sort.StringSlice(sn.Paths).Sort()
|
||||
var k []byte
|
||||
var err error
|
||||
|
||||
k, err = json.Marshal(key{Tags: tags, Hostname: hostname, Paths: paths})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
snapshotGroups, _, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policy := restic.ExpirePolicy{
|
||||
Last: opts.Last,
|
||||
Hourly: opts.Hourly,
|
||||
Daily: opts.Daily,
|
||||
Weekly: opts.Weekly,
|
||||
Monthly: opts.Monthly,
|
||||
Yearly: opts.Yearly,
|
||||
Within: opts.Within,
|
||||
Tags: opts.KeepTags,
|
||||
}
|
||||
policy := restic.ExpirePolicy{
|
||||
Last: opts.Last,
|
||||
Hourly: opts.Hourly,
|
||||
Daily: opts.Daily,
|
||||
Weekly: opts.Weekly,
|
||||
Monthly: opts.Monthly,
|
||||
Yearly: opts.Yearly,
|
||||
Within: opts.Within,
|
||||
Tags: opts.KeepTags,
|
||||
}
|
||||
|
||||
if policy.Empty() && len(args) == 0 {
|
||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
if policy.Empty() && len(args) == 0 {
|
||||
if !gopts.JSON {
|
||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
}
|
||||
|
||||
if !policy.Empty() {
|
||||
Verbosef("Applying Policy: %v\n", policy)
|
||||
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
var key key
|
||||
if json.Unmarshal([]byte(k), &key) != nil {
|
||||
return err
|
||||
if !policy.Empty() {
|
||||
if !gopts.JSON {
|
||||
Verbosef("Applying Policy: %v\n", policy)
|
||||
}
|
||||
|
||||
// Info
|
||||
Verbosef("snapshots")
|
||||
var infoStrings []string
|
||||
if GroupByTag {
|
||||
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
||||
}
|
||||
if GroupByHost {
|
||||
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
||||
}
|
||||
if GroupByPath {
|
||||
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
||||
}
|
||||
if infoStrings != nil {
|
||||
Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
|
||||
}
|
||||
Verbosef(":\n\n")
|
||||
var jsonGroups []*ForgetGroup
|
||||
|
||||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
removeSnapshots += len(remove)
|
||||
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
err = repo.Backend().Remove(gopts.ctx, h)
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||
err = PrintSnapshotGroupHeader(gopts.stdout, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var key restic.SnapshotGroupKey
|
||||
if json.Unmarshal([]byte(k), &key) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fg ForgetGroup
|
||||
fg.Tags = key.Tags
|
||||
fg.Host = key.Hostname
|
||||
fg.Paths = key.Paths
|
||||
|
||||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Keep, keep)
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Remove, remove)
|
||||
|
||||
fg.Reasons = reasons
|
||||
|
||||
jsonGroups = append(jsonGroups, &fg)
|
||||
|
||||
removeSnapshots += len(remove)
|
||||
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
err = repo.Backend().Remove(gopts.ctx, h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gopts.JSON {
|
||||
err = printJSONForget(gopts.stdout, jsonGroups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removeSnapshots > 0 && opts.Prune {
|
||||
Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
if !gopts.JSON {
|
||||
Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
}
|
||||
if !opts.DryRun {
|
||||
return pruneRepository(gopts, repo)
|
||||
}
|
||||
@@ -244,3 +224,28 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForgetGroup helps to print what is forgotten in JSON.
|
||||
type ForgetGroup struct {
|
||||
Tags []string `json:"tags"`
|
||||
Host string `json:"host"`
|
||||
Paths []string `json:"paths"`
|
||||
Keep []Snapshot `json:"keep"`
|
||||
Remove []Snapshot `json:"remove"`
|
||||
Reasons []restic.KeepReason `json:"reasons"`
|
||||
}
|
||||
|
||||
func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) {
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
*js = append(*js, k)
|
||||
}
|
||||
}
|
||||
|
||||
func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error {
|
||||
return json.NewEncoder(stdout).Encode(forgets)
|
||||
}
|
||||
|
@@ -12,8 +12,13 @@ var cmdGenerate = &cobra.Command{
|
||||
Use: "generate [command]",
|
||||
Short: "Generate manual pages and auto-completion files (bash, zsh)",
|
||||
Long: `
|
||||
The "generate" command writes automatically generated files like the man pages
|
||||
The "generate" command writes automatically generated files (like the man pages
|
||||
and the auto-completion files for bash and zsh).
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: runGenerate,
|
||||
|
@@ -12,6 +12,11 @@ var cmdInit = &cobra.Command{
|
||||
Short: "Initialize a new repository",
|
||||
Long: `
|
||||
The "init" command initializes a new repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -20,6 +20,11 @@ var cmdKey = &cobra.Command{
|
||||
Short: "Manage keys (passwords)",
|
||||
Long: `
|
||||
The "key" command manages keys (passwords) for accessing the repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -15,6 +15,11 @@ var cmdList = &cobra.Command{
|
||||
Short: "List objects in the repository",
|
||||
Long: `
|
||||
The "list" command allows listing objects in the repository based on type.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -33,6 +33,11 @@ will be listed. If the --recursive flag is used, then the filter
|
||||
will allow traversing into matching directories' subfolders.
|
||||
Any directory paths specified must be absolute (starting with
|
||||
a path separator); paths use the forward slash '/' as separator.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -43,7 +48,7 @@ a path separator); paths use the forward slash '/' as separator.
|
||||
// LsOptions collects all options for the ls command.
|
||||
type LsOptions struct {
|
||||
ListLong bool
|
||||
Host string
|
||||
Hosts []string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Recursive bool
|
||||
@@ -56,7 +61,7 @@ func init() {
|
||||
|
||||
flags := cmdLs.Flags()
|
||||
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||
flags.StringVarP(&lsOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
|
||||
flags.StringArrayVarP(&lsOptions.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when no snapshot ID is given (can be specified multiple times)")
|
||||
flags.Var(&lsOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot ID is given")
|
||||
flags.StringArrayVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given")
|
||||
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
|
||||
@@ -84,7 +89,7 @@ type lsNode struct {
|
||||
}
|
||||
|
||||
func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) == 0 && opts.Host == "" && len(opts.Tags) == 0 && len(opts.Paths) == 0 {
|
||||
if len(args) == 0 && len(opts.Hosts) == 0 && len(opts.Tags) == 0 && len(opts.Paths) == 0 {
|
||||
return errors.Fatal("Invalid arguments, either give one or more snapshot IDs or set filters.")
|
||||
}
|
||||
|
||||
@@ -186,7 +191,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args[:1]) {
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Hosts, opts.Tags, opts.Paths, args[:1]) {
|
||||
printSnapshot(sn)
|
||||
|
||||
err := walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
|
@@ -13,6 +13,11 @@ var cmdMigrate = &cobra.Command{
|
||||
Long: `
|
||||
The "migrate" command applies migrations to a repository. When no migration
|
||||
name is explicitly given, a list of migrations that can be applied is printed.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -44,6 +44,11 @@ You need to specify a sample format for exactly the following timestamp:
|
||||
|
||||
For details please see the documentation for time.Format() at:
|
||||
https://godoc.org/time#Time.Format
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -54,10 +59,9 @@ For details please see the documentation for time.Format() at:
|
||||
// MountOptions collects all options for the mount command.
|
||||
type MountOptions struct {
|
||||
OwnerRoot bool
|
||||
AllowRoot bool
|
||||
AllowOther bool
|
||||
NoDefaultPermissions bool
|
||||
Host string
|
||||
Hosts []string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
SnapshotTemplate string
|
||||
@@ -70,11 +74,10 @@ func init() {
|
||||
|
||||
mountFlags := cmdMount.Flags()
|
||||
mountFlags.BoolVar(&mountOptions.OwnerRoot, "owner-root", false, "use 'root' as the owner of files and dirs")
|
||||
mountFlags.BoolVar(&mountOptions.AllowRoot, "allow-root", false, "allow root user to access the data in the mounted directory")
|
||||
mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory")
|
||||
mountFlags.BoolVar(&mountOptions.NoDefaultPermissions, "no-default-permissions", false, "for 'allow-other', ignore Unix permissions and allow users to read all snapshot files")
|
||||
|
||||
mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
|
||||
mountFlags.StringArrayVarP(&mountOptions.Hosts, "host", "H", nil, `only consider snapshots for this host (can be specified multiple times)`)
|
||||
mountFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`")
|
||||
mountFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
|
||||
|
||||
@@ -114,10 +117,6 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
systemFuse.FSName("restic"),
|
||||
}
|
||||
|
||||
if opts.AllowRoot {
|
||||
mountOptions = append(mountOptions, systemFuse.AllowRoot())
|
||||
}
|
||||
|
||||
if opts.AllowOther {
|
||||
mountOptions = append(mountOptions, systemFuse.AllowOther())
|
||||
|
||||
@@ -138,7 +137,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
|
||||
cfg := fuse.Config{
|
||||
OwnerIsRoot: opts.OwnerRoot,
|
||||
Host: opts.Host,
|
||||
Hosts: opts.Hosts,
|
||||
Tags: opts.Tags,
|
||||
Paths: opts.Paths,
|
||||
SnapshotTemplate: opts.SnapshotTemplate,
|
||||
@@ -149,7 +148,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
}
|
||||
|
||||
Printf("Now serving the repository at %s\n", mountpoint)
|
||||
Printf("Don't forget to umount after quitting!\n")
|
||||
Printf("When finished, quit with Ctrl-c or umount the mountpoint.\n")
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, root)
|
||||
|
@@ -13,6 +13,11 @@ var optionsCmd = &cobra.Command{
|
||||
Short: "Print list of extended options",
|
||||
Long: `
|
||||
The "options" command prints a list of extended options.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
Hidden: true,
|
||||
DisableAutoGenTag: true,
|
||||
|
@@ -19,6 +19,11 @@ var cmdPrune = &cobra.Command{
|
||||
Long: `
|
||||
The "prune" command checks the repository and removes data that is not
|
||||
referenced and therefore not needed any more.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -16,6 +16,11 @@ var cmdRebuildIndex = &cobra.Command{
|
||||
Long: `
|
||||
The "rebuild-index" command creates a new index based on the pack files in the
|
||||
repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -57,11 +62,17 @@ func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks resti
|
||||
}
|
||||
|
||||
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
||||
idx, _, err := index.New(ctx, repo, ignorePacks, bar)
|
||||
idx, invalidFiles, err := index.New(ctx, repo, ignorePacks, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalOptions.verbosity >= 2 {
|
||||
for _, id := range invalidFiles {
|
||||
Printf("skipped incomplete pack file: %v\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
Verbosef("finding old index files\n")
|
||||
|
||||
var supersedes restic.IDs
|
||||
|
@@ -16,6 +16,11 @@ var cmdRecover = &cobra.Command{
|
||||
The "recover" command build a new snapshot from all directories it can find in
|
||||
the raw data of the repository. It can be used if, for example, a snapshot has
|
||||
been removed by accident with "forget".
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/filter"
|
||||
@@ -19,6 +21,11 @@ a directory.
|
||||
|
||||
The special snapshot "latest" can be used to restore the latest snapshot in the
|
||||
repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -28,13 +35,15 @@ repository.
|
||||
|
||||
// RestoreOptions collects all options for the restore command.
|
||||
type RestoreOptions struct {
|
||||
Exclude []string
|
||||
Include []string
|
||||
Target string
|
||||
Host string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
Verify bool
|
||||
Exclude []string
|
||||
InsensitiveExclude []string
|
||||
Include []string
|
||||
InsensitiveInclude []string
|
||||
Target string
|
||||
Hosts []string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
Verify bool
|
||||
}
|
||||
|
||||
var restoreOptions RestoreOptions
|
||||
@@ -44,10 +53,12 @@ func init() {
|
||||
|
||||
flags := cmdRestore.Flags()
|
||||
flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
flags.StringArrayVar(&restoreOptions.InsensitiveExclude, "iexclude", nil, "same as `--exclude` but ignores the casing of filenames")
|
||||
flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
|
||||
flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as `--include` but ignores the casing of filenames")
|
||||
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
|
||||
|
||||
flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
|
||||
flags.StringArrayVarP(&restoreOptions.Hosts, "host", "H", nil, `only consider snapshots for this host when the snapshot ID is "latest" (can be specified multiple times)`)
|
||||
flags.Var(&restoreOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"")
|
||||
flags.StringArrayVar(&restoreOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
|
||||
flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content")
|
||||
@@ -55,6 +66,16 @@ func init() {
|
||||
|
||||
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
ctx := gopts.ctx
|
||||
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
|
||||
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
|
||||
|
||||
for i, str := range opts.InsensitiveExclude {
|
||||
opts.InsensitiveExclude[i] = strings.ToLower(str)
|
||||
}
|
||||
|
||||
for i, str := range opts.InsensitiveInclude {
|
||||
opts.InsensitiveInclude[i] = strings.ToLower(str)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(args) == 0:
|
||||
@@ -67,7 +88,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("please specify a directory to restore to (--target)")
|
||||
}
|
||||
|
||||
if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
|
||||
if hasExcludes && hasIncludes {
|
||||
return errors.Fatal("exclude and include patterns are mutually exclusive")
|
||||
}
|
||||
|
||||
@@ -96,9 +117,9 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
var id restic.ID
|
||||
|
||||
if snapshotIDString == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Hosts)
|
||||
if err != nil {
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Hosts:%v", err, opts.Paths, opts.Hosts)
|
||||
}
|
||||
} else {
|
||||
id, err = restic.FindSnapshot(repo, snapshotIDString)
|
||||
@@ -125,11 +146,16 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
Warnf("error for exclude pattern: %v", err)
|
||||
}
|
||||
|
||||
matchedInsensitive, _, err := filter.List(opts.InsensitiveExclude, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
// An exclude filter is basically a 'wildcard but foo',
|
||||
// so even if a childMayMatch, other children of a dir may not,
|
||||
// therefore childMayMatch does not matter, but we should not go down
|
||||
// unless the dir is selected for restore
|
||||
selectedForRestore = !matched
|
||||
selectedForRestore = !matched && !matchedInsensitive
|
||||
childMayBeSelected = selectedForRestore && node.Type == "dir"
|
||||
|
||||
return selectedForRestore, childMayBeSelected
|
||||
@@ -141,15 +167,20 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
Warnf("error for include pattern: %v", err)
|
||||
}
|
||||
|
||||
selectedForRestore = matched
|
||||
childMayBeSelected = childMayMatch && node.Type == "dir"
|
||||
matchedInsensitive, childMayMatchInsensitive, err := filter.List(opts.InsensitiveInclude, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
selectedForRestore = matched || matchedInsensitive
|
||||
childMayBeSelected = (childMayMatch || childMayMatchInsensitive) && node.Type == "dir"
|
||||
|
||||
return selectedForRestore, childMayBeSelected
|
||||
}
|
||||
|
||||
if len(opts.Exclude) > 0 {
|
||||
if hasExcludes {
|
||||
res.SelectFilter = selectExcludeFilter
|
||||
} else if len(opts.Include) > 0 {
|
||||
} else if hasIncludes {
|
||||
res.SelectFilter = selectIncludeFilter
|
||||
}
|
||||
|
||||
|
@@ -18,6 +18,11 @@ The command "self-update" downloads the latest stable release of restic from
|
||||
GitHub and replaces the currently running binary. After download, the
|
||||
authenticity of the binary is verified using the GPG signature on the release
|
||||
files.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -18,6 +18,11 @@ var cmdSnapshots = &cobra.Command{
|
||||
Short: "List all snapshots",
|
||||
Long: `
|
||||
The "snapshots" command lists all snapshots stored in the repository.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -27,11 +32,12 @@ The "snapshots" command lists all snapshots stored in the repository.
|
||||
|
||||
// SnapshotOptions bundles all options for the snapshots command.
|
||||
type SnapshotOptions struct {
|
||||
Host string
|
||||
Hosts []string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Compact bool
|
||||
Last bool
|
||||
GroupBy string
|
||||
}
|
||||
|
||||
var snapshotOptions SnapshotOptions
|
||||
@@ -40,11 +46,12 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdSnapshots)
|
||||
|
||||
f := cmdSnapshots.Flags()
|
||||
f.StringVarP(&snapshotOptions.Host, "host", "H", "", "only consider snapshots for this `host`")
|
||||
f.StringArrayVarP(&snapshotOptions.Hosts, "host", "H", nil, "only consider snapshots for this `host` (can be specified multiple times)")
|
||||
f.Var(&snapshotOptions.Tags, "tag", "only consider snapshots which include this `taglist` (can be specified multiple times)")
|
||||
f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
|
||||
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
|
||||
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
|
||||
f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "string for grouping snapshots by host,paths,tags")
|
||||
}
|
||||
|
||||
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||
@@ -64,25 +71,41 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
|
||||
var list restic.Snapshots
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
list = append(list, sn)
|
||||
var snapshots restic.Snapshots
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.Last {
|
||||
list = FilterLastSnapshots(list)
|
||||
for k, list := range snapshotGroups {
|
||||
if opts.Last {
|
||||
list = FilterLastSnapshots(list)
|
||||
}
|
||||
sort.Sort(sort.Reverse(list))
|
||||
snapshotGroups[k] = list
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(list))
|
||||
|
||||
if gopts.JSON {
|
||||
err := printSnapshotsJSON(gopts.stdout, list)
|
||||
err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
|
||||
if err != nil {
|
||||
Warnf("error printing snapshot: %v\n", err)
|
||||
Warnf("error printing snapshots: %v\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
|
||||
|
||||
for k, list := range snapshotGroups {
|
||||
if grouped {
|
||||
err := PrintSnapshotGroupHeader(gopts.stdout, k)
|
||||
if err != nil {
|
||||
Warnf("error printing snapshots: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -223,6 +246,42 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
||||
tab.Write(stdout)
|
||||
}
|
||||
|
||||
// PrintSnapshotGroupHeader prints which group of the group-by option the
|
||||
// following snapshots belong to.
|
||||
// Prints nothing, if we did not group at all.
|
||||
func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
|
||||
err = json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if key.Hostname == "" && key.Tags == nil && key.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Info
|
||||
fmt.Fprintf(stdout, "snapshots")
|
||||
var infoStrings []string
|
||||
if key.Hostname != "" {
|
||||
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
||||
}
|
||||
if key.Tags != nil {
|
||||
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
||||
}
|
||||
if key.Paths != nil {
|
||||
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
||||
}
|
||||
if infoStrings != nil {
|
||||
fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", "))
|
||||
}
|
||||
fmt.Fprintf(stdout, ":\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshot helps to print Snaphots as JSON with their ID included.
|
||||
type Snapshot struct {
|
||||
*restic.Snapshot
|
||||
@@ -231,19 +290,58 @@ type Snapshot struct {
|
||||
ShortID string `json:"short_id"`
|
||||
}
|
||||
|
||||
// printSnapshotsJSON writes the JSON representation of list to stdout.
|
||||
func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
|
||||
// SnapshotGroup helps to print SnaphotGroups as JSON with their GroupReasons included.
|
||||
type SnapshotGroup struct {
|
||||
GroupKey restic.SnapshotGroupKey `json:"group_key"`
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
}
|
||||
|
||||
// printSnapshotsJSON writes the JSON representation of list to stdout.
|
||||
func printSnapshotGroupJSON(stdout io.Writer, snGroups map[string]restic.Snapshots, grouped bool) error {
|
||||
if grouped {
|
||||
var snapshotGroups []SnapshotGroup
|
||||
|
||||
for k, list := range snGroups {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
var snapshots []Snapshot
|
||||
|
||||
err = json.Unmarshal([]byte(k), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
|
||||
group := SnapshotGroup{
|
||||
GroupKey: key,
|
||||
Snapshots: snapshots,
|
||||
}
|
||||
snapshotGroups = append(snapshotGroups, group)
|
||||
}
|
||||
|
||||
return json.NewEncoder(stdout).Encode(snapshotGroups)
|
||||
}
|
||||
|
||||
// Old behavior
|
||||
var snapshots []Snapshot
|
||||
|
||||
for _, sn := range list {
|
||||
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
for _, list := range snGroups {
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
|
||||
return json.NewEncoder(stdout).Encode(snapshots)
|
||||
|
@@ -36,7 +36,13 @@ The modes are:
|
||||
* raw-data: Counts the size of blobs in the repository, regardless of
|
||||
how many files reference them.
|
||||
* blobs-per-file: A combination of files-by-contents and raw-data.
|
||||
* Refer to the online manual for more details about each mode.
|
||||
|
||||
Refer to the online manual for more details about each mode.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -48,7 +54,7 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdStats)
|
||||
f := cmdStats.Flags()
|
||||
f.StringVar(&countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file, or raw-data")
|
||||
f.StringVarP(&snapshotByHost, "host", "H", "", "filter latest snapshot by this hostname")
|
||||
f.StringArrayVarP(&snapshotByHosts, "host", "H", nil, "filter latest snapshot by this hostname (can be specified multiple times)")
|
||||
}
|
||||
|
||||
func runStats(gopts GlobalOptions, args []string) error {
|
||||
@@ -83,10 +89,11 @@ func runStats(gopts GlobalOptions, args []string) error {
|
||||
|
||||
// create a container for the stats (and other needed state)
|
||||
stats := &statsContainer{
|
||||
uniqueFiles: make(map[fileID]struct{}),
|
||||
fileBlobs: make(map[string]restic.IDSet),
|
||||
blobs: restic.NewBlobSet(),
|
||||
blobsSeen: restic.NewBlobSet(),
|
||||
uniqueFiles: make(map[fileID]struct{}),
|
||||
uniqueInodes: make(map[uint64]struct{}),
|
||||
fileBlobs: make(map[string]restic.IDSet),
|
||||
blobs: restic.NewBlobSet(),
|
||||
blobsSeen: restic.NewBlobSet(),
|
||||
}
|
||||
|
||||
if snapshotIDString != "" {
|
||||
@@ -94,7 +101,7 @@ func runStats(gopts GlobalOptions, args []string) error {
|
||||
|
||||
var sID restic.ID
|
||||
if snapshotIDString == "latest" {
|
||||
sID, err = restic.FindLatestSnapshot(ctx, repo, []string{}, []restic.TagList{}, snapshotByHost)
|
||||
sID, err = restic.FindLatestSnapshot(ctx, repo, []string{}, []restic.TagList{}, snapshotByHosts)
|
||||
if err != nil {
|
||||
return errors.Fatalf("latest snapshot for criteria not found: %v", err)
|
||||
}
|
||||
@@ -111,6 +118,9 @@ func runStats(gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
err = statsWalkSnapshot(ctx, snapshot, repo, stats)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking snapshot: %v", err)
|
||||
}
|
||||
} else {
|
||||
// iterate every snapshot in the repo
|
||||
err = repo.List(ctx, restic.SnapshotFile, func(snapshotID restic.ID, size int64) error {
|
||||
@@ -184,7 +194,7 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
|
||||
}
|
||||
|
||||
func statsWalkTree(repo restic.Repository, stats *statsContainer) walker.WalkFunc {
|
||||
return func(_ restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
|
||||
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
|
||||
if nodeErr != nil {
|
||||
return true, nodeErr
|
||||
}
|
||||
@@ -219,7 +229,7 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer) walker.WalkFun
|
||||
// is always a data blob since we're accessing it via a file's Content array
|
||||
blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob)
|
||||
if !found {
|
||||
return true, fmt.Errorf("blob %s not found for tree %s", blobID, *node.Subtree)
|
||||
return true, fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID)
|
||||
}
|
||||
|
||||
// count the blob's size, then add this blob by this
|
||||
@@ -238,8 +248,16 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer) walker.WalkFun
|
||||
// as this is a file in the snapshot, we can simply count its
|
||||
// size without worrying about uniqueness, since duplicate files
|
||||
// will still be restored
|
||||
stats.TotalSize += node.Size
|
||||
stats.TotalFileCount++
|
||||
|
||||
// if inodes are present, only count each inode once
|
||||
// (hard links do not increase restore size)
|
||||
if _, ok := stats.uniqueInodes[node.Inode]; !ok || node.Inode == 0 {
|
||||
stats.uniqueInodes[node.Inode] = struct{}{}
|
||||
stats.TotalSize += node.Size
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@@ -292,6 +310,10 @@ type statsContainer struct {
|
||||
// contents (hashed sequence of content blob IDs)
|
||||
uniqueFiles map[fileID]struct{}
|
||||
|
||||
// uniqueInodes marks visited files according to their
|
||||
// inode # (hashed sequence of inode numbers)
|
||||
uniqueInodes map[uint64]struct{}
|
||||
|
||||
// fileBlobs maps a file name (path) to the set of
|
||||
// blobs that have been seen as a part of the file
|
||||
fileBlobs map[string]restic.IDSet
|
||||
@@ -313,7 +335,7 @@ var (
|
||||
|
||||
// snapshotByHost is the host to filter latest
|
||||
// snapshot by, if given by user
|
||||
snapshotByHost string
|
||||
snapshotByHosts []string
|
||||
)
|
||||
|
||||
const (
|
||||
|
@@ -21,6 +21,11 @@ You can either set/replace the entire set of tags on a snapshot, or
|
||||
add tags to/remove tags from the existing set.
|
||||
|
||||
When no snapshot-ID is given, all snapshots matching the host, tag and path filter criteria are modified.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -30,7 +35,7 @@ When no snapshot-ID is given, all snapshots matching the host, tag and path filt
|
||||
|
||||
// TagOptions bundles all options for the 'tag' command.
|
||||
type TagOptions struct {
|
||||
Host string
|
||||
Hosts []string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
SetTags []string
|
||||
@@ -48,7 +53,7 @@ func init() {
|
||||
tagFlags.StringSliceVar(&tagOptions.AddTags, "add", nil, "`tag` which will be added to the existing tags (can be given multiple times)")
|
||||
tagFlags.StringSliceVar(&tagOptions.RemoveTags, "remove", nil, "`tag` which will be removed from the existing tags (can be given multiple times)")
|
||||
|
||||
tagFlags.StringVarP(&tagOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
|
||||
tagFlags.StringArrayVarP(&tagOptions.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when no snapshot ID is given (can be specified multiple times)")
|
||||
tagFlags.Var(&tagOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given")
|
||||
tagFlags.StringArrayVar(&tagOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
|
||||
}
|
||||
@@ -124,7 +129,7 @@ func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
|
||||
changeCnt := 0
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
||||
changed, err := changeTags(ctx, repo, sn, opts.SetTags, opts.AddTags, opts.RemoveTags)
|
||||
if err != nil {
|
||||
Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)
|
||||
|
@@ -10,6 +10,11 @@ var unlockCmd = &cobra.Command{
|
||||
Short: "Remove locks other processes created",
|
||||
Long: `
|
||||
The "unlock" command removes stale locks that have been created by other restic processes.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -13,6 +13,11 @@ var versionCmd = &cobra.Command{
|
||||
Long: `
|
||||
The "version" command prints detailed information about the build environment
|
||||
and the version of this software.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
@@ -88,6 +88,18 @@ func rejectByPattern(patterns []string) RejectByNameFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// Same as `rejectByPattern` but case insensitive.
|
||||
func rejectByInsensitivePattern(patterns []string) RejectByNameFunc {
|
||||
for index, path := range patterns {
|
||||
patterns[index] = strings.ToLower(path)
|
||||
}
|
||||
|
||||
rejFunc := rejectByPattern(patterns)
|
||||
return func(item string) bool {
|
||||
return rejFunc(strings.ToLower(item))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path
|
||||
// should be excluded. The RejectByNameFunc considers a file to be excluded when
|
||||
// it resides in a directory with an exclusion file, that is specified by
|
||||
|
@@ -36,6 +36,33 @@ func TestRejectByPattern(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejectByInsensitivePattern(t *testing.T) {
|
||||
var tests = []struct {
|
||||
filename string
|
||||
reject bool
|
||||
}{
|
||||
{filename: "/home/user/foo.GO", reject: true},
|
||||
{filename: "/home/user/foo.c", reject: false},
|
||||
{filename: "/home/user/foobar", reject: false},
|
||||
{filename: "/home/user/FOObar/x", reject: true},
|
||||
{filename: "/home/user/README", reject: false},
|
||||
{filename: "/home/user/readme.md", reject: true},
|
||||
}
|
||||
|
||||
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
reject := rejectByInsensitivePattern(patterns)
|
||||
res := reject(tc.filename)
|
||||
if res != tc.reject {
|
||||
t.Fatalf("wrong result for filename %v: want %v, got %v",
|
||||
tc.filename, tc.reject, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsExcludedByFile(t *testing.T) {
|
||||
const (
|
||||
tagFilename = "CACHEDIR.TAG"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user