Compare commits
426 Commits
v0.5.0-rc.
...
v0.7.0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
27ea0623d7 | ||
![]() |
390e2bbddc | ||
![]() |
b50fc08f39 | ||
![]() |
b2ce7e8d84 | ||
![]() |
2b1c6d3cf8 | ||
![]() |
c658305a1b | ||
![]() |
63235d8f94 | ||
![]() |
144b7f3386 | ||
![]() |
9583dc820f | ||
![]() |
a03076f2d8 | ||
![]() |
d76fa22b4b | ||
![]() |
f960831f10 | ||
![]() |
b0fb95dfc9 | ||
![]() |
bca9566849 | ||
![]() |
8760de42fe | ||
![]() |
2c02efd1fe | ||
![]() |
4b4a63ed44 | ||
![]() |
64f434eca4 | ||
![]() |
f4e85a53e7 | ||
![]() |
f8176a74ec | ||
![]() |
e60a96a71a | ||
![]() |
216e2607ca | ||
![]() |
53f8026018 | ||
![]() |
de92ce7a88 | ||
![]() |
eb8041b943 | ||
![]() |
9c6e9bcf33 | ||
![]() |
154816ffd0 | ||
![]() |
c86e425df6 | ||
![]() |
3883c7a190 | ||
![]() |
a66760d86d | ||
![]() |
52752659c1 | ||
![]() |
f676c0c41b | ||
![]() |
f31e993f09 | ||
![]() |
56f610e548 | ||
![]() |
052a6a0acc | ||
![]() |
77037e33c9 | ||
![]() |
5a34799554 | ||
![]() |
47282abfa4 | ||
![]() |
c9cc724b31 | ||
![]() |
0d3674245b | ||
![]() |
82b21cdf4a | ||
![]() |
c4592f577a | ||
![]() |
3cd851e578 | ||
![]() |
e074833a7d | ||
![]() |
c5f1a83cb4 | ||
![]() |
1baaa778ee | ||
![]() |
6a948d5afd | ||
![]() |
ea66ae0811 | ||
![]() |
bf8a155fb1 | ||
![]() |
4ae59bef96 | ||
![]() |
eadf5dcb2d | ||
![]() |
91a24e8229 | ||
![]() |
e3c979a7a4 | ||
![]() |
05365706c0 | ||
![]() |
bbca31b661 | ||
![]() |
eb7fc12e01 | ||
![]() |
98ae7b1210 | ||
![]() |
51877cecf7 | ||
![]() |
9053b2000b | ||
![]() |
dd6ce5f9d8 | ||
![]() |
9a8301fc74 | ||
![]() |
aabe2a0a30 | ||
![]() |
c79fb6fcdd | ||
![]() |
af9ba3be91 | ||
![]() |
6f24d038f8 | ||
![]() |
cf65893c4b | ||
![]() |
bd7d5a429f | ||
![]() |
7b54f6e642 | ||
![]() |
422c0dfb5e | ||
![]() |
73b296918b | ||
![]() |
907c201693 | ||
![]() |
58de8bf392 | ||
![]() |
a89a7a783a | ||
![]() |
c422010597 | ||
![]() |
08e1d9ffad | ||
![]() |
a4e8dc3371 | ||
![]() |
19da56a6ea | ||
![]() |
d3c06c39f9 | ||
![]() |
6301620428 | ||
![]() |
a6f157f346 | ||
![]() |
8d4417ec92 | ||
![]() |
0b55be2581 | ||
![]() |
88a59fd0ca | ||
![]() |
539674614b | ||
![]() |
9d1b9157d4 | ||
![]() |
5f449045d2 | ||
![]() |
3e4d236751 | ||
![]() |
4fe6593fbe | ||
![]() |
635633379a | ||
![]() |
48fecd791d | ||
![]() |
a325a20fb4 | ||
![]() |
1f0916b01b | ||
![]() |
eb767ab15f | ||
![]() |
92c0aa3854 | ||
![]() |
a61016cb55 | ||
![]() |
eb7ddd6e11 | ||
![]() |
ff3d2e42f4 | ||
![]() |
1aab123b6c | ||
![]() |
d11f8d294f | ||
![]() |
04ded881f6 | ||
![]() |
4f9bf5312b | ||
![]() |
7cf8f59987 | ||
![]() |
b8b5c8e8c9 | ||
![]() |
a46baf7685 | ||
![]() |
f2a51aa37c | ||
![]() |
233eaf8ee9 | ||
![]() |
067be2c551 | ||
![]() |
550e1feaec | ||
![]() |
f90ce23f30 | ||
![]() |
29f8f8fe68 | ||
![]() |
48c1e7b00d | ||
![]() |
2175ccedd2 | ||
![]() |
d4e74f20aa | ||
![]() |
aa5bc39311 | ||
![]() |
46049b4236 | ||
![]() |
683ebef6c6 | ||
![]() |
5010e95c23 | ||
![]() |
46b7a270a6 | ||
![]() |
cf497c2728 | ||
![]() |
16fcd07110 | ||
![]() |
a9a2798910 | ||
![]() |
9cd664caa3 | ||
![]() |
a90e0c6595 | ||
![]() |
7b5efaf7b0 | ||
![]() |
3b7ca4ac35 | ||
![]() |
40a61b82ce | ||
![]() |
028f43299a | ||
![]() |
3a4727f0f5 | ||
![]() |
fec89f95fb | ||
![]() |
5681d41f76 | ||
![]() |
efd61d97ef | ||
![]() |
3ed56f2192 | ||
![]() |
122462b9b1 | ||
![]() |
2217b9277e | ||
![]() |
b5e0e3631b | ||
![]() |
be68e43871 | ||
![]() |
f6034c0882 | ||
![]() |
f693781bf0 | ||
![]() |
3ae9be987f | ||
![]() |
ec0975c388 | ||
![]() |
c2ce484e93 | ||
![]() |
e5c7c314a7 | ||
![]() |
6d36dcd46e | ||
![]() |
96c9ecd20e | ||
![]() |
997be9a036 | ||
![]() |
31fd8e98b9 | ||
![]() |
aa0f874c8d | ||
![]() |
5c59484d2b | ||
![]() |
fba6211c99 | ||
![]() |
a8386e7d71 | ||
![]() |
04b262d8f1 | ||
![]() |
4dbbc24a44 | ||
![]() |
725d50554a | ||
![]() |
ed91cafce2 | ||
![]() |
de48a5ac9c | ||
![]() |
1d167f4680 | ||
![]() |
efad7ee197 | ||
![]() |
820c88ea73 | ||
![]() |
e7f031c9b3 | ||
![]() |
f3f6924b61 | ||
![]() |
c5244abad9 | ||
![]() |
1f5954e2c1 | ||
![]() |
e046a2a6da | ||
![]() |
8395b53400 | ||
![]() |
24ec14738d | ||
![]() |
79477fdfe4 | ||
![]() |
7ec0543af3 | ||
![]() |
e73e3cb4ba | ||
![]() |
317d9c4559 | ||
![]() |
5247de552a | ||
![]() |
37b107b90b | ||
![]() |
17ff41af47 | ||
![]() |
772f647a73 | ||
![]() |
5c4facd9dc | ||
![]() |
40ad1df6ee | ||
![]() |
c49da4c6f7 | ||
![]() |
d1cc87ba28 | ||
![]() |
89be1d496a | ||
![]() |
69e114c5c4 | ||
![]() |
5d9a4ce90c | ||
![]() |
84f1037b8f | ||
![]() |
120af801cf | ||
![]() |
f3d09ce7c8 | ||
![]() |
2b9323529f | ||
![]() |
971ecee171 | ||
![]() |
4640d1a28e | ||
![]() |
8b461a7456 | ||
![]() |
2fa1238b8a | ||
![]() |
55ae5dab2b | ||
![]() |
fa41183a53 | ||
![]() |
959aa0f595 | ||
![]() |
069752cb42 | ||
![]() |
0c537837d9 | ||
![]() |
f19852a738 | ||
![]() |
61cade6222 | ||
![]() |
a4e3a0dd97 | ||
![]() |
22a6cd3a26 | ||
![]() |
7f10828dfa | ||
![]() |
04264c6338 | ||
![]() |
13393c76dc | ||
![]() |
13c12ca83d | ||
![]() |
7fffd408af | ||
![]() |
089b04c8a9 | ||
![]() |
b192dacc13 | ||
![]() |
250a45ab15 | ||
![]() |
3b44b87137 | ||
![]() |
26c16b9fd3 | ||
![]() |
0bd40bae6e | ||
![]() |
246ccf09b9 | ||
![]() |
9452f416bf | ||
![]() |
1e0e6ee573 | ||
![]() |
be0e53c07b | ||
![]() |
9dd58196e3 | ||
![]() |
66b4999765 | ||
![]() |
403e201e1a | ||
![]() |
77a55fbe5c | ||
![]() |
90c1608d88 | ||
![]() |
7a51640262 | ||
![]() |
13946e7db7 | ||
![]() |
e009c002ba | ||
![]() |
d24e0cc6cc | ||
![]() |
5b8131e2d3 | ||
![]() |
8fc25cc567 | ||
![]() |
77ebb95d3d | ||
![]() |
f142b1c22f | ||
![]() |
1c9159d6a0 | ||
![]() |
82e15dc6dc | ||
![]() |
ee68f9298b | ||
![]() |
5c6ec78789 | ||
![]() |
4ac0d3ad40 | ||
![]() |
c7209ef231 | ||
![]() |
0249c16b04 | ||
![]() |
f43d34899d | ||
![]() |
19daefd04e | ||
![]() |
ec5e984ed9 | ||
![]() |
88de3cfecc | ||
![]() |
a654f41ddb | ||
![]() |
fbf2462325 | ||
![]() |
85e4831198 | ||
![]() |
091dc29f72 | ||
![]() |
59d1986660 | ||
![]() |
db4fa48f66 | ||
![]() |
f2b83ece10 | ||
![]() |
860b52273e | ||
![]() |
bafceb56fb | ||
![]() |
9271b3662a | ||
![]() |
554013ca9f | ||
![]() |
3250fdc2ca | ||
![]() |
3678d34b16 | ||
![]() |
16276853a1 | ||
![]() |
f10c24e404 | ||
![]() |
6f5fd72738 | ||
![]() |
103a491ac0 | ||
![]() |
929f90344e | ||
![]() |
a963052d64 | ||
![]() |
898613e14f | ||
![]() |
b84e63d503 | ||
![]() |
63870d2830 | ||
![]() |
54e46f5984 | ||
![]() |
0096eca7fe | ||
![]() |
ce3acbd30a | ||
![]() |
c5ae5524ff | ||
![]() |
fbf5a8123b | ||
![]() |
edbd6ad584 | ||
![]() |
faf11c4a46 | ||
![]() |
68a9e5f963 | ||
![]() |
fa7f0ef44f | ||
![]() |
bcfe7afbfd | ||
![]() |
46e6e8984d | ||
![]() |
0f057bd440 | ||
![]() |
ba91a76f5f | ||
![]() |
c09e005cc8 | ||
![]() |
2c42629c51 | ||
![]() |
f93e5a39e5 | ||
![]() |
02eafe94b3 | ||
![]() |
7d914768bb | ||
![]() |
db6541b24c | ||
![]() |
228566c0a1 | ||
![]() |
1e4bcf3367 | ||
![]() |
51e7d26053 | ||
![]() |
e42627d2cb | ||
![]() |
548d4eed95 | ||
![]() |
8a5034d289 | ||
![]() |
5a68641f27 | ||
![]() |
ce28584dda | ||
![]() |
ab870dd691 | ||
![]() |
aed0126102 | ||
![]() |
d0db1bf9b3 | ||
![]() |
90ba606633 | ||
![]() |
5b80cb8b6b | ||
![]() |
e6ca604d24 | ||
![]() |
9940b7f853 | ||
![]() |
faca9276e9 | ||
![]() |
5a7e463ef6 | ||
![]() |
27c5a2825a | ||
![]() |
e9d939ff2d | ||
![]() |
88e8eaa575 | ||
![]() |
3bbcf89105 | ||
![]() |
1794bdc663 | ||
![]() |
bcf97ebf1f | ||
![]() |
a1496547fd | ||
![]() |
0befa06cd0 | ||
![]() |
407b8c179b | ||
![]() |
aafee25d58 | ||
![]() |
adb602dbfe | ||
![]() |
085b6bede8 | ||
![]() |
096aab592b | ||
![]() |
2f117982e9 | ||
![]() |
b05603eb6c | ||
![]() |
9344b0b3a8 | ||
![]() |
89ea1171db | ||
![]() |
28968caf33 | ||
![]() |
16fd1c2352 | ||
![]() |
b942f61272 | ||
![]() |
e3d1badfcf | ||
![]() |
76f6a9e597 | ||
![]() |
69dd1d2544 | ||
![]() |
dd65ac56ef | ||
![]() |
c796d84fca | ||
![]() |
525db875b0 | ||
![]() |
e73038cbcb | ||
![]() |
00a8edb4a0 | ||
![]() |
ee7e0d05f5 | ||
![]() |
6357dc8a91 | ||
![]() |
5bd95b3ce1 | ||
![]() |
be06983c80 | ||
![]() |
55bdc1fa16 | ||
![]() |
a634c22ae0 | ||
![]() |
859ee23d2e | ||
![]() |
b7671dafc8 | ||
![]() |
541484d142 | ||
![]() |
f531ca3b48 | ||
![]() |
0da7264e75 | ||
![]() |
7f3bcdb4cc | ||
![]() |
5eaa51eeff | ||
![]() |
7b64b890d7 | ||
![]() |
ccc201ea5f | ||
![]() |
e6578857cf | ||
![]() |
320c22f1f5 | ||
![]() |
e2af5890f3 | ||
![]() |
36b1c0898c | ||
![]() |
94441dcbee | ||
![]() |
c723cdf808 | ||
![]() |
c2ee0d9c84 | ||
![]() |
e8780f1ec6 | ||
![]() |
783fd73ea1 | ||
![]() |
74eb293733 | ||
![]() |
42ea4d257b | ||
![]() |
a849edf19a | ||
![]() |
0cbd59856c | ||
![]() |
698ba57597 | ||
![]() |
27ce6a85e9 | ||
![]() |
ae290ab374 | ||
![]() |
ab602c9d14 | ||
![]() |
2e53af1b75 | ||
![]() |
a725e065d9 | ||
![]() |
db7e23b423 | ||
![]() |
dc8493535d | ||
![]() |
10a395ca33 | ||
![]() |
522c7ade91 | ||
![]() |
a3d6099892 | ||
![]() |
280028290e | ||
![]() |
c195139d31 | ||
![]() |
6f1b03415c | ||
![]() |
ffcb015581 | ||
![]() |
67a2ac292b | ||
![]() |
6674b2a70c | ||
![]() |
c855d6bb9a | ||
![]() |
1086528ab7 | ||
![]() |
c26dd6b76f | ||
![]() |
d3b6f75848 | ||
![]() |
d1efdcd78e | ||
![]() |
95ab5adda1 | ||
![]() |
c5eb36fe9d | ||
![]() |
e3e3a8a695 | ||
![]() |
24ebf95f33 | ||
![]() |
54465c92cc | ||
![]() |
f7c4b3a922 | ||
![]() |
50dfa64a54 | ||
![]() |
c6b8ffbb61 | ||
![]() |
3e81dcdfc2 | ||
![]() |
782b740c95 | ||
![]() |
3fd6fa6f86 | ||
![]() |
6a201f7962 | ||
![]() |
80a864c52c | ||
![]() |
c8eea49909 | ||
![]() |
95c354fe81 | ||
![]() |
6935f82389 | ||
![]() |
719bb18316 | ||
![]() |
a8a7701f60 | ||
![]() |
2924ebc124 | ||
![]() |
f587a5f4f0 | ||
![]() |
946b4f4b86 | ||
![]() |
d0a5e86da1 | ||
![]() |
2e0b19f63f | ||
![]() |
9861f3d435 | ||
![]() |
482a6e9840 | ||
![]() |
6a8700d86c | ||
![]() |
1733b24f9d | ||
![]() |
c85055bb03 | ||
![]() |
280f05b174 | ||
![]() |
990b0f1c15 | ||
![]() |
6f76a6db66 | ||
![]() |
0c2834edb7 | ||
![]() |
f1ba45723c | ||
![]() |
b523eef294 | ||
![]() |
bf30b2831b | ||
![]() |
642cd3bebf | ||
![]() |
057dd6c590 | ||
![]() |
0bd7db9efe | ||
![]() |
f678c97346 | ||
![]() |
887e81188f | ||
![]() |
8d0140aabe | ||
![]() |
2a8f275a28 | ||
![]() |
55c3150946 | ||
![]() |
b56e16acd0 | ||
![]() |
b75186533d | ||
![]() |
8958efba60 | ||
![]() |
a9707a5728 | ||
![]() |
7635feb591 | ||
![]() |
087c2917aa | ||
![]() |
deddedd88f | ||
![]() |
b38294f236 | ||
![]() |
fdb9872cef | ||
![]() |
f8dd5d5088 | ||
![]() |
9e0207e534 |
13
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,6 +1,18 @@
|
||||
<!--
|
||||
NOTE: Not filling out the issue template needs a good reason, otherwise the
|
||||
issue may be closed instantly! Please take the time to help us debugging the
|
||||
problem by collecting information, even if it seems irrelevant to you. Thanks!
|
||||
-->
|
||||
|
||||
## Output of `restic version`
|
||||
|
||||
|
||||
## How did you start restic exactly? (Include the complete command line)
|
||||
|
||||
|
||||
## What backend/server/service did you use?
|
||||
|
||||
|
||||
## Expected behavior
|
||||
|
||||
|
||||
@@ -10,3 +22,4 @@
|
||||
## Steps to reproduce the behavior
|
||||
|
||||
|
||||
## Do you have any idea what may have caused this?
|
||||
|
1
.gitignore
vendored
@@ -3,3 +3,4 @@
|
||||
/restic
|
||||
/.vagrant
|
||||
/vendor/pkg
|
||||
/doc/_build
|
||||
|
10
.travis.yml
@@ -2,8 +2,8 @@ language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.7.5
|
||||
- 1.8
|
||||
- 1.7.6
|
||||
- 1.8.3
|
||||
- tip
|
||||
|
||||
os:
|
||||
@@ -17,14 +17,14 @@ env:
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
go: 1.7.5
|
||||
go: 1.7.6
|
||||
- os: osx
|
||||
go: tip
|
||||
- os: linux
|
||||
go: 1.8
|
||||
go: 1.8.3
|
||||
include:
|
||||
- os: linux
|
||||
go: 1.8
|
||||
go: 1.8.3
|
||||
sudo: true
|
||||
env:
|
||||
RESTIC_TEST_FUSE=1
|
||||
|
126
CHANGELOG.md
Normal file
@@ -0,0 +1,126 @@
|
||||
This file describes changes relevant to all users that are made in each
|
||||
released version of restic from the perspective of the user.
|
||||
|
||||
Important Changes in 0.X.Y
|
||||
==========================
|
||||
|
||||
* New "swift" backend: A new backend for the OpenStack Swift cloud storage
|
||||
protocol has been added, https://wiki.openstack.org/wiki/Swift
|
||||
https://github.com/restic/restic/pull/975
|
||||
https://github.com/restic/restic/pull/648
|
||||
|
||||
* New "b2" backend: A new backend for Backblaze B2 cloud storage
|
||||
service has been added, https://www.backblaze.com
|
||||
https://github.com/restic/restic/issues/512
|
||||
https://github.com/restic/restic/pull/978
|
||||
|
||||
* Improved performance for the `find` command: Restic recognizes paths it has
|
||||
already checked for the files in question, so the number of backend requests
|
||||
is reduced a lot.
|
||||
https://github.com/restic/restic/issues/989
|
||||
https://github.com/restic/restic/pull/993
|
||||
|
||||
* Improved performance for the fuse mount: Listing directories which contain
|
||||
large files now is significantly faster.
|
||||
https://github.com/restic/restic/pull/998
|
||||
|
||||
* The default layout for the s3 backend is now `default` (instead of
|
||||
`s3legacy`). Also, there's a new `migrate` command to convert an existing
|
||||
repo, it can be run like this: `restic migrate s3_layout`
|
||||
https://github.com/restic/restic/issues/965
|
||||
https://github.com/restic/restic/pull/1004
|
||||
|
||||
* The fuse mount now has two more directories: `tags` contains a subdir for
|
||||
each tag, which in turn contains only the snapshots that have this tag. The
|
||||
subdir `hosts` contains a subdir for each host that has a snapshot, and the
|
||||
subdir contains the snapshots for that host.
|
||||
https://github.com/restic/restic/issues/636
|
||||
https://github.com/restic/restic/pull/1050
|
||||
|
||||
Small changes
|
||||
-------------
|
||||
|
||||
* For the s3 backend we're back to using the high-level API the s3 client
|
||||
library for uploading data, a few users reported dropped connections (which
|
||||
the library will automatically retry now).
|
||||
https://github.com/restic/restic/issues/1013
|
||||
https://github.com/restic/restic/issues/1023
|
||||
https://github.com/restic/restic/pull/1025
|
||||
|
||||
* The `prune` command has been improved and will now remove invalid pack
|
||||
files, for example files that have not been uploaded completely because a
|
||||
backup was interrupted.
|
||||
https://github.com/restic/restic/issues/1029
|
||||
https://github.com/restic/restic/pull/1036
|
||||
|
||||
* restic now tries to detect when an invalid/unknown backend is used and
|
||||
returns an error message.
|
||||
https://github.com/restic/restic/issues/1021
|
||||
https://github.com/restic/restic/pull/1070
|
||||
|
||||
Important Changes in 0.6.1
|
||||
==========================
|
||||
|
||||
This is mostly a bugfix release and only contains small changes:
|
||||
|
||||
* We've fixed a bug where `rebuild-index` would corrupt the index when used
|
||||
with the s3 backend together with the `default` layout. This is not the
|
||||
default setting.
|
||||
|
||||
* Backends based on HTTP now allow several idle connections in parallel. This
|
||||
is especially important for the REST backend, which (when used with a local
|
||||
server) may create a lot connections and exhaust available ports quickly.
|
||||
https://github.com/restic/restic/issues/985
|
||||
https://github.com/restic/restic/pull/986
|
||||
|
||||
* Regular status report: We've removed the status report that was printed
|
||||
every 10 seconds when restic is run non-interactively. You can still force
|
||||
reporting the current status by sending a `USR1` signal to the process.
|
||||
https://github.com/restic/restic/pull/974
|
||||
|
||||
* The `build.go` now strips the temporary directory used for compilation from
|
||||
the binary. This is the first step in enabling reproducible builds.
|
||||
https://github.com/restic/restic/pull/981
|
||||
|
||||
Important Changes in 0.6.0
|
||||
==========================
|
||||
|
||||
Consistent forget policy
|
||||
------------------------
|
||||
|
||||
The `forget` command was corrected to be more consistent in which snapshots are
|
||||
to be forgotten. It is possible that the new code removes more snapshots than
|
||||
before, so please review what would be deleted by using the `--dry-run` option.
|
||||
|
||||
https://github.com/restic/restic/pull/957
|
||||
https://github.com/restic/restic/issues/953
|
||||
|
||||
Unified repository layout
|
||||
-------------------------
|
||||
|
||||
Up to now the s3 backend used a special repository layout. We've decided to
|
||||
unify the repository layout and implemented the default layout also for the s3
|
||||
backend. For creating a new repository on s3 with the default layout, use
|
||||
`restic -o s3.layout=default init`. For further commands the option is not
|
||||
necessary any more, restic will automatically detect the correct layout to use.
|
||||
A future version will switch to the default layout for new repositories.
|
||||
|
||||
https://github.com/restic/restic/pull/966
|
||||
https://github.com/restic/restic/issues/965
|
||||
|
||||
Memory and time improvements for the s3 backend
|
||||
-----------------------------------------------
|
||||
|
||||
We've updated the library used for accessing s3, switched to using a lower
|
||||
level API and added caching for some requests. This lead to a decrease in
|
||||
memory usage and a great speedup. In addition, we added benchmark functions for
|
||||
all backends, so we can track improvements over time. The Continuous
|
||||
Integration test service we're using (Travis) now runs the s3 backend tests not
|
||||
only against a Minio server, but also against the Amazon s3 live service, so we
|
||||
should be notified of any regressions much sooner.
|
||||
|
||||
https://github.com/restic/restic/pull/962
|
||||
https://github.com/restic/restic/pull/960
|
||||
https://github.com/restic/restic/pull/946
|
||||
https://github.com/restic/restic/pull/938
|
||||
https://github.com/restic/restic/pull/883
|
@@ -137,7 +137,13 @@ down to the following steps:
|
||||
commits to the branch you created for the pull request, they will be
|
||||
automatically added to the pull request.
|
||||
|
||||
7. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
7. If your pull request changes anything that users should be aware of (a
|
||||
bugfix, a new feature, ...) please add an entry to the file
|
||||
['CHANGELOG.md'](CHANGELOG.md). It will be used in the announcement of the
|
||||
next stable release. While writing, ask yourself: If I were the user, what
|
||||
would I need to be aware of with this change.
|
||||
|
||||
8. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
a low for your contribution!
|
||||
|
||||
Please provide the patches for each bug or feature in a separate branch and
|
||||
|
@@ -18,7 +18,7 @@
|
||||
|
||||
FROM ubuntu:14.04
|
||||
|
||||
ARG GOVERSION=1.7.5
|
||||
ARG GOVERSION=1.8.3
|
||||
ARG GOARCH=amd64
|
||||
|
||||
# install dependencies
|
||||
|
95
README.md
@@ -1,95 +0,0 @@
|
||||
[](https://restic.readthedocs.io/en/latest/?badge=latest)
|
||||
[](https://travis-ci.org/restic/restic)
|
||||
[](https://ci.appveyor.com/project/fd0/restic/branch/master)
|
||||
[](http://goreportcard.com/report/github.com/restic/restic)
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
restic is a backup program that is fast, efficient and secure. Detailed
|
||||
information can be found in [the documentation](doc/index.md) and [the user
|
||||
manual](doc/Manual.md). The [design document](doc/Design.md) lists the
|
||||
technical background and gives detailed information about the structure of the
|
||||
repository and the data saved therein. The file [FAQ.md](doc/FAQ.md) lists the
|
||||
most frequently asked questions.
|
||||
|
||||
The latest documentation can be viewed online at
|
||||
<https://restic.readthedocs.io/en/latest>. On the bottom left corner there is
|
||||
a menu that allows switching to the documentation and user manual for the
|
||||
latest released version.
|
||||
|
||||
News
|
||||
====
|
||||
|
||||
You can follow the restic project on Twitter
|
||||
[@resticbackup](https://twitter.com/resticbackup) or by subscribing to the
|
||||
[development blog](https://restic.github.io/blog/).
|
||||
|
||||
Install restic
|
||||
==============
|
||||
|
||||
You can download the latest pre-compiled binary from the [restic release page](https://github.com/restic/restic/releases/latest).
|
||||
|
||||
Build restic
|
||||
============
|
||||
|
||||
Install Go/Golang (at least version 1.7), then run `go run build.go`,
|
||||
afterwards you'll find the binary in the current directory:
|
||||
|
||||
$ go run build.go
|
||||
|
||||
$ ./restic --help
|
||||
Usage:
|
||||
restic [OPTIONS] <command>
|
||||
[...]
|
||||
|
||||
More documentation can be found in the [user manual](doc/Manual.md).
|
||||
|
||||
At the moment, the only tested compiler for restic is the official Go compiler.
|
||||
Building restic with gccgo may work, but is not supported.
|
||||
|
||||
Contribute and Documentation
|
||||
============================
|
||||
|
||||
Contributions are welcome! Please **open an issue first** (or add a comment to
|
||||
an existing issue) if you plan to work on any code or add a new feature. This
|
||||
way, duplicate work is prevented and we can discuss your ideas and design
|
||||
first.
|
||||
|
||||
More information and a description of the development environment can be found
|
||||
in [`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
|
||||
restic and the data structures stored on the back end is contained in
|
||||
[`doc/Design.md`](doc/Design.md).
|
||||
|
||||
If you'd like to start contributing to restic, but don't know exactly what do
|
||||
to, have a look at this great article by Dave Cheney:
|
||||
[Suggestions for contributing to an Open Source project](http://dave.cheney.net/2016/03/12/suggestions-for-contributing-to-an-open-source-project)
|
||||
A few issues have been tagged with the label `help wanted`, you can start
|
||||
looking at those: https://github.com/restic/restic/labels/help%20wanted
|
||||
|
||||
Contact
|
||||
=======
|
||||
|
||||
If you discover a bug, find something surprising or if you would like to
|
||||
discuss or ask something, please [open a github issue](https://github.com/restic/restic/issues/new).
|
||||
If you would like to chat about restic, there is also the IRC channel #restic
|
||||
on irc.freenode.net.
|
||||
|
||||
**Important**: If you discover something that you believe to be a possible critical
|
||||
security problem, please do *not* open a GitHub issue but send an email directly to
|
||||
alexander@bumpern.de. If possible, please encrypt your email using the following PGP key
|
||||
([0x91A6868BD3F7A907](https://pgp.mit.edu/pks/lookup?op=get&search=0xCF8F18F2844575973F79D4E191A6868BD3F7A907)):
|
||||
|
||||
```
|
||||
pub 4096R/91A6868BD3F7A907 2014-11-01
|
||||
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
|
||||
uid Alexander Neumann <alexander@bumpern.de>
|
||||
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
|
||||
```
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
Restic is licensed under "BSD 2-Clause License". You can find the complete text
|
||||
in the file `LICENSE`.
|
101
README.rst
Normal file
@@ -0,0 +1,101 @@
|
||||
|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks|
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
restic is a backup program that is fast, efficient and secure.
|
||||
|
||||
For detailed usage and installation instructions check out the `documentation <https://restic.readthedocs.io/en/latest>`__.
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
Once you've `installed
|
||||
<https://restic.readthedocs.io/en/latest/installation.html>`__ restic, start
|
||||
off with creating a repository for your backups:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic init --repo /tmp/backup
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend 085b3c76b9 at /tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
and add some data:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup ~/work
|
||||
enter password for repository:
|
||||
scan [/home/user/work]
|
||||
scanned 764 directories, 1816 files in 0:00
|
||||
[0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
|
||||
duration: 0:29, 54.47MiB/s
|
||||
snapshot 40dc1520 saved
|
||||
|
||||
For more options check out the `manual guide <https://restic.readthedocs.io/en/latest/manual.html>`__.
|
||||
|
||||
Design Principles
|
||||
-----------------
|
||||
|
||||
Restic is a program that does backups right and was designed with the
|
||||
following principles in mind:
|
||||
|
||||
- **Easy:** Doing backups should be a frictionless process, otherwise
|
||||
you might be tempted to skip it. Restic should be easy to configure
|
||||
and use, so that, in the event of a data loss, you can just restore
|
||||
it. Likewise, restoring data should not be complicated.
|
||||
|
||||
- **Fast**: Backing up your data with restic should only be limited by
|
||||
your network or hard disk bandwidth so that you can backup your files
|
||||
every day. Nobody does backups if it takes too much time. Restoring
|
||||
backups should only transfer data that is needed for the files that
|
||||
are to be restored, so that this process is also fast.
|
||||
|
||||
- **Verifiable**: Much more important than backup is restore, so restic
|
||||
enables you to easily verify that all data can be restored.
|
||||
|
||||
- **Secure**: Restic uses cryptography to guarantee confidentiality and
|
||||
integrity of your data. The location the backup data is stored is
|
||||
assumed not to be a trusted environment (e.g. a shared space where
|
||||
others like system administrators are able to access your backups).
|
||||
Restic is built to secure your data against such attackers.
|
||||
|
||||
- **Efficient**: With the growth of data, additional snapshots should
|
||||
only take the storage of the actual increment. Even more, duplicate
|
||||
data should be de-duplicated before it is actually written to the
|
||||
storage back end to save precious backup space.
|
||||
|
||||
Reproducible Builds
|
||||
-------------------
|
||||
|
||||
The binaries released with each restic version starting at 0.6.1 are
|
||||
`reproducible <https://reproducible-builds.org/>`__, which means that you can
|
||||
easily reproduce a byte identical version from the source code for that
|
||||
release. Instructions on how to do that are contained in the
|
||||
`builder repository <https://github.com/restic/builder>`__.
|
||||
|
||||
News
|
||||
----
|
||||
|
||||
You can follow the restic project on Twitter `@resticbackup <https://twitter.com/resticbackup>`__ or by subscribing to
|
||||
the `development blog <https://restic.github.io/blog/>`__.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Restic is licensed under "BSD 2-Clause License". You can find the
|
||||
complete text in ``LICENSE``.
|
||||
|
||||
.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest
|
||||
:target: https://restic.readthedocs.io/en/latest/?badge=latest
|
||||
.. |Build Status| image:: https://travis-ci.org/restic/restic.svg?branch=master
|
||||
:target: https://travis-ci.org/restic/restic
|
||||
.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true
|
||||
:target: https://ci.appveyor.com/project/fd0/restic/branch/master
|
||||
.. |Report Card| image:: http://goreportcard.com/badge/github.com/restic/restic
|
||||
:target: http://goreportcard.com/report/github.com/restic/restic
|
||||
.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg
|
||||
:target: https://saythanks.io/to/restic
|
@@ -17,8 +17,8 @@ init:
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.windows-amd64.msi
|
||||
- msiexec /i go1.8.windows-amd64.msi /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
|
||||
- msiexec /i go1.8.1.windows-amd64.msi /q
|
||||
- go version
|
||||
- go env
|
||||
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
||||
|
43
build.go
@@ -18,6 +18,7 @@ var (
|
||||
verbose bool
|
||||
keepGopath bool
|
||||
runTests bool
|
||||
enableCGO bool
|
||||
)
|
||||
|
||||
var config = struct {
|
||||
@@ -49,7 +50,7 @@ func specialDir(name string) bool {
|
||||
// excludePath returns true if the file should not be copied to the new GOPATH.
|
||||
func excludePath(name string) bool {
|
||||
ext := path.Ext(name)
|
||||
if ext == ".go" || ext == ".s" {
|
||||
if ext == ".go" || ext == ".s" || ext == ".h" {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -125,6 +126,7 @@ func copyFile(dst, src string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fsrc.Close()
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
|
||||
fmt.Printf("MkdirAll(%v)\n", filepath.Dir(dst))
|
||||
@@ -135,28 +137,17 @@ func copyFile(dst, src string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fdst.Close()
|
||||
|
||||
if _, err = io.Copy(fdst, fsrc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fsrc.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fdst.Close()
|
||||
}
|
||||
|
||||
_, err = io.Copy(fdst, fsrc)
|
||||
if err == nil {
|
||||
err = os.Chmod(dst, fi.Mode())
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = os.Chtimes(dst, fi.ModTime(), fi.ModTime())
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// die prints the message with fmt.Fprintf() to stderr and exits with an error
|
||||
@@ -175,6 +166,7 @@ func showUsage(output io.Writer) {
|
||||
fmt.Fprintf(output, " -k --keep-gopath do not remove the GOPATH after build\n")
|
||||
fmt.Fprintf(output, " -T --test run tests\n")
|
||||
fmt.Fprintf(output, " -o --output set output file name\n")
|
||||
fmt.Fprintf(output, " --enable-cgo use CGO to link against libc\n")
|
||||
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
|
||||
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
|
||||
}
|
||||
@@ -203,9 +195,16 @@ func cleanEnv() (env []string) {
|
||||
|
||||
// build runs "go build args..." with GOPATH set to gopath.
|
||||
func build(cwd, goos, goarch, gopath string, args ...string) error {
|
||||
args = append([]string{"build"}, args...)
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos, "CGO_ENABLED=0")
|
||||
a := []string{"build"}
|
||||
a = append(a, "-asmflags", fmt.Sprintf("-trimpath=%s", gopath))
|
||||
a = append(a, "-gcflags", fmt.Sprintf("-trimpath=%s", gopath))
|
||||
a = append(a, args...)
|
||||
cmd := exec.Command("go", a...)
|
||||
cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos)
|
||||
if !enableCGO {
|
||||
cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
|
||||
}
|
||||
|
||||
cmd.Dir = cwd
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
@@ -291,6 +290,12 @@ func (cs Constants) LDFlags() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ver := runtime.Version()
|
||||
if strings.HasPrefix(ver, "go1") && ver < "go1.7" {
|
||||
fmt.Fprintf(os.Stderr, "Go version %s detected, restic requires at least Go 1.7\n", ver)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
buildTags := []string{}
|
||||
|
||||
skipNext := false
|
||||
@@ -323,6 +328,8 @@ func main() {
|
||||
outputFilename = params[i+1]
|
||||
case "-T", "--test":
|
||||
runTests = true
|
||||
case "--enable-cgo":
|
||||
enableCGO = true
|
||||
case "--goos":
|
||||
skipNext = true
|
||||
targetGOOS = params[i+1]
|
||||
|
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
echo '$VERSION unset'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dir=$(mktemp -d --tmpdir restic-release-XXXXXX)
|
||||
echo "path is ${dir}"
|
||||
|
||||
for R in \
|
||||
darwin/386 \
|
||||
darwin/amd64 \
|
||||
freebsd/386 \
|
||||
freebsd/amd64 \
|
||||
freebsd/arm \
|
||||
linux/386 \
|
||||
linux/amd64 \
|
||||
linux/arm \
|
||||
linux/arm64 \
|
||||
openbsd/386 \
|
||||
openbsd/amd64 \
|
||||
windows/386 \
|
||||
windows/amd64 \
|
||||
; do \
|
||||
|
||||
OS=$(dirname $R)
|
||||
ARCH=$(basename $R)
|
||||
filename=restic_${VERSION}_${OS}_${ARCH}
|
||||
|
||||
if [[ "$OS" == "windows" ]]; then
|
||||
filename="${filename}.exe"
|
||||
fi
|
||||
|
||||
echo $filename
|
||||
|
||||
go run build.go --goos $OS --goarch $ARCH --output ${filename}
|
||||
if [[ "$OS" == "windows" ]]; then
|
||||
zip ${filename%.exe}.zip ${filename}
|
||||
rm ${filename}
|
||||
mv ${filename%.exe}.zip ${dir}
|
||||
else
|
||||
bzip2 ${filename}
|
||||
mv ${filename}.bz2 ${dir}
|
||||
fi
|
||||
done
|
||||
|
||||
echo "packing sources"
|
||||
git archive --format=tar --prefix=restic-$VERSION/ v$VERSION | gzip -n > restic-$VERSION.tar.gz
|
||||
mv restic-$VERSION.tar.gz ${dir}
|
||||
|
||||
echo "creating checksums"
|
||||
pushd ${dir}
|
||||
sha256sum restic_*.{zip,bz2} restic-$VERSION.tar.gz > SHA256SUMS
|
||||
gpg --armor --detach-sign SHA256SUMS
|
||||
popd
|
||||
|
||||
echo "creating source signature file"
|
||||
gpg --armor --detach-sign ${dir}/restic-$VERSION.tar.gz
|
||||
|
||||
echo
|
||||
echo "done, path is ${dir}"
|
538
doc/Design.md
@@ -1,538 +0,0 @@
|
||||
This document gives a high-level overview of the design and repository layout
|
||||
of the restic backup program.
|
||||
|
||||
Terminology
|
||||
===========
|
||||
|
||||
This section introduces terminology used in this document.
|
||||
|
||||
*Repository*: All data produced during a backup is sent to and stored in a
|
||||
repository in a structured form, for example in a file system hierarchy with
|
||||
several subdirectories. A repository implementation must be able to fulfill a
|
||||
number of operations, e.g. list the contents.
|
||||
|
||||
*Blob*: A Blob combines a number of data bytes with identifying information
|
||||
like the SHA-256 hash of the data and its length.
|
||||
|
||||
*Pack*: A Pack combines one or more Blobs, e.g. in a single file.
|
||||
|
||||
*Snapshot*: A Snapshot stands for the state of a file or directory that has
|
||||
been backed up at some point in time. The state here means the content and meta
|
||||
data like the name and modification time for the file or the directory and its
|
||||
contents.
|
||||
|
||||
*Storage ID*: A storage ID is the SHA-256 hash of the content stored in the
|
||||
repository. This ID is required in order to load the file from the repository.
|
||||
|
||||
Repository Format
|
||||
=================
|
||||
|
||||
All data is stored in a restic repository. A repository is able to store data
|
||||
of several different types, which can later be requested based on an ID. This
|
||||
so-called "storage ID" is the SHA-256 hash of the content of a file. All files
|
||||
in a repository are only written once and never modified afterwards. This
|
||||
allows accessing and even writing to the repository with multiple clients in
|
||||
parallel. Only the delete operation removes data from the repository.
|
||||
|
||||
At the time of writing, the only implemented repository type is based on
|
||||
directories and files. Such repositories can be accessed locally on the same
|
||||
system or via the integrated SFTP client (or any other storage back end).
|
||||
The directory layout is the same for both access methods.
|
||||
This repository type is described in the following section.
|
||||
|
||||
Repositories consist of several directories and a file called `config`. For
|
||||
all other files stored in the repository, the name for the file is the lower
|
||||
case hexadecimal representation of the storage ID, which is the SHA-256 hash of
|
||||
the file's contents. This allows for easy verification of files for accidental
|
||||
modifications, like disk read errors, by simply running the program `sha256sum`
|
||||
and comparing its output to the file name. If the prefix of a filename is
|
||||
unique amongst all the other files in the same directory, the prefix may be
|
||||
used instead of the complete filename.
|
||||
|
||||
Apart from the files stored within the `keys` directory, all files are encrypted
|
||||
with AES-256 in counter mode (CTR). The integrity of the encrypted data is
|
||||
secured by a Poly1305-AES message authentication code (sometimes also referred
|
||||
to as a "signature").
|
||||
|
||||
In the first 16 bytes of each encrypted file the initialisation vector (IV) is
|
||||
stored. It is followed by the encrypted data and completed by the 16 byte
|
||||
MAC. The format is: `IV || CIPHERTEXT || MAC`. The complete encryption
|
||||
overhead is 32 bytes. For each file, a new random IV is selected.
|
||||
|
||||
The file `config` is encrypted this way and contains a JSON document like the
|
||||
following:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
|
||||
"chunker_polynomial": "25b468838dcb75"
|
||||
}
|
||||
```
|
||||
|
||||
After decryption, restic first checks that the version field contains a version
|
||||
number that it understands, otherwise it aborts. At the moment, the version is
|
||||
expected to be 1. The field `id` holds a unique ID which consists of 32
|
||||
random bytes, encoded in hexadecimal. This uniquely identifies the repository,
|
||||
regardless if it is accessed via SFTP or locally. The field
|
||||
`chunker_polynomial` contains a parameter that is used for splitting large
|
||||
files into smaller chunks (see below).
|
||||
|
||||
The basic layout of a sample restic repository is shown here:
|
||||
|
||||
/tmp/restic-repo
|
||||
├── config
|
||||
├── data
|
||||
│ ├── 21
|
||||
│ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
||||
│ ├── 32
|
||||
│ │ └── 32ea976bc30771cebad8285cd99120ac8786f9ffd42141d452458089985043a5
|
||||
│ ├── 59
|
||||
│ │ └── 59fe4bcde59bd6222eba87795e35a90d82cd2f138a27b6835032b7b58173a426
|
||||
│ ├── 73
|
||||
│ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
||||
│ [...]
|
||||
├── index
|
||||
│ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
||||
│ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
||||
├── keys
|
||||
│ └── b02de829beeb3c01a63e6b25cbd421a98fef144f03b9a02e46eff9e2ca3f0bd7
|
||||
├── locks
|
||||
├── snapshots
|
||||
│ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||
└── tmp
|
||||
|
||||
A repository can be initialized with the `restic init` command, e.g.:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo init
|
||||
```
|
||||
|
||||
Pack Format
|
||||
-----------
|
||||
|
||||
All files in the repository except Key and Pack files just contain raw data,
|
||||
stored as `IV || Ciphertext || MAC`. Pack files may contain one or more Blobs
|
||||
of data.
|
||||
|
||||
A Pack's structure is as follows:
|
||||
|
||||
EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length
|
||||
|
||||
At the end of the Pack file is a header, which describes the content. The
|
||||
header is encrypted and authenticated. `Header_Length` is the length of the
|
||||
encrypted header encoded as a four byte integer in little-endian encoding.
|
||||
Placing the header at the end of a file allows writing the blobs in a
|
||||
continuous stream as soon as they are read during the backup phase. This
|
||||
reduces code complexity and avoids having to re-write a file once the pack is
|
||||
complete and the content and length of the header is known.
|
||||
|
||||
All the blobs (`EncryptedBlob1`, `EncryptedBlobN` etc.) are authenticated and
|
||||
encrypted independently. This enables repository reorganisation without having
|
||||
to touch the encrypted Blobs. In addition it also allows efficient indexing,
|
||||
for only the header needs to be read in order to find out which Blobs are
|
||||
contained in the Pack. Since the header is authenticated, authenticity of the
|
||||
header can be checked without having to read the complete Pack.
|
||||
|
||||
After decryption, a Pack's header consists of the following elements:
|
||||
|
||||
Type_Blob1 || Length(EncryptedBlob1) || Hash(Plaintext_Blob1) ||
|
||||
[...]
|
||||
Type_BlobN || Length(EncryptedBlobN) || Hash(Plaintext_Blobn) ||
|
||||
|
||||
This is enough to calculate the offsets for all the Blobs in the Pack. Length
|
||||
is the length of a Blob as a four byte integer in little-endian format. The
|
||||
type field is a one byte field and labels the content of a blob according to
|
||||
the following table:
|
||||
|
||||
Type | Meaning
|
||||
-----|---------
|
||||
0 | data
|
||||
1 | tree
|
||||
|
||||
All other types are invalid, more types may be added in the future.
|
||||
|
||||
For reconstructing the index or parsing a pack without an index, first the last
|
||||
four bytes must be read in order to find the length of the header. Afterwards,
|
||||
the header can be read and parsed, which yields all plaintext hashes, types,
|
||||
offsets and lengths of all included blobs.
|
||||
|
||||
Indexing
|
||||
--------
|
||||
|
||||
Index files contain information about Data and Tree Blobs and the Packs they
|
||||
are contained in and store this information in the repository. When the local
|
||||
cached index is not accessible any more, the index files can be downloaded and
|
||||
used to reconstruct the index. The files are encrypted and authenticated like
|
||||
Data and Tree Blobs, so the outer structure is `IV || Ciphertext || MAC` again.
|
||||
The plaintext consists of a JSON document like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"supersedes": [
|
||||
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
|
||||
],
|
||||
"packs": [
|
||||
{
|
||||
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
|
||||
"blobs": [
|
||||
{
|
||||
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
|
||||
"type": "data",
|
||||
"offset": 0,
|
||||
"length": 25
|
||||
},{
|
||||
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
|
||||
"type": "tree",
|
||||
"offset": 38,
|
||||
"length": 100
|
||||
},
|
||||
{
|
||||
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
|
||||
"type": "data",
|
||||
"offset": 150,
|
||||
"length": 123
|
||||
}
|
||||
]
|
||||
}, [...]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This JSON document lists Packs and the blobs contained therein. In this
|
||||
example, the Pack `73d04e61` contains two data Blobs and one Tree blob, the
|
||||
plaintext hashes are listed afterwards.
|
||||
|
||||
The field `supersedes` lists the storage IDs of index files that have been
|
||||
replaced with the current index file. This happens when index files are
|
||||
repacked, for example when old snapshots are removed and Packs are recombined.
|
||||
|
||||
There may be an arbitrary number of index files, containing information on
|
||||
non-disjoint sets of Packs. The number of packs described in a single file is
|
||||
chosen so that the file size is kept below 8 MiB.
|
||||
|
||||
Keys, Encryption and MAC
|
||||
------------------------
|
||||
|
||||
All data stored by restic in the repository is encrypted with AES-256 in
|
||||
counter mode and authenticated using Poly1305-AES. For encrypting new data first
|
||||
16 bytes are read from a cryptographically secure pseudorandom number generator
|
||||
as a random nonce. This is used both as the IV for counter mode and the nonce
|
||||
for Poly1305. This operation needs three keys: A 32 byte for AES-256 for
|
||||
encryption, a 16 byte AES key and a 16 byte key for Poly1305. For details see
|
||||
the original paper [The Poly1305-AES message-authentication
|
||||
code](http://cr.yp.to/mac/poly1305-20050329.pdf) by Dan Bernstein.
|
||||
The data is then encrypted with AES-256 and afterwards a message authentication
|
||||
code (MAC) is computed over the ciphertext, everything is then stored as
|
||||
IV || CIPHERTEXT || MAC.
|
||||
|
||||
The directory `keys` contains key files. These are simple JSON documents which
|
||||
contain all data that is needed to derive the repository's master encryption and
|
||||
message authentication keys from a user's password. The JSON document from the
|
||||
repository can be pretty-printed for example by using the Python module `json`
|
||||
(shortened to increase readability):
|
||||
|
||||
$ python -mjson.tool /tmp/restic-repo/keys/b02de82*
|
||||
{
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0"
|
||||
"kdf": "scrypt",
|
||||
"N": 65536,
|
||||
"r": 8,
|
||||
"p": 1,
|
||||
"created": "2015-01-02T18:10:13.48307196+01:00",
|
||||
"data": "tGwYeKoM0C4j4/9DFrVEmMGAldvEn/+iKC3te/QE/6ox/V4qz58FUOgMa0Bb1cIJ6asrypCx/Ti/pRXCPHLDkIJbNYd2ybC+fLhFIJVLCvkMS+trdywsUkglUbTbi+7+Ldsul5jpAj9vTZ25ajDc+4FKtWEcCWL5ICAOoTAxnPgT+Lh8ByGQBH6KbdWabqamLzTRWxePFoYuxa7yXgmj9A==",
|
||||
"salt": "uW4fEI1+IOzj7ED9mVor+yTSJFd68DGlGOeLgJELYsTU5ikhG/83/+jGd4KKAaQdSrsfzrdOhAMftTSih5Ux6w==",
|
||||
}
|
||||
|
||||
When the repository is opened by restic, the user is prompted for the
|
||||
repository password. This is then used with `scrypt`, a key derivation function
|
||||
(KDF), and the supplied parameters (`N`, `r`, `p` and `salt`) to derive 64 key
|
||||
bytes. The first 32 bytes are used as the encryption key (for AES-256) and the
|
||||
last 32 bytes are used as the message authentication key (for Poly1305-AES).
|
||||
These last 32 bytes are divided into a 16 byte AES key `k` followed by 16 bytes
|
||||
of secret key `r`. The key `r` is then masked for use with Poly1305 (see the
|
||||
paper for details).
|
||||
|
||||
Those message authentication keys (`k` and `r`) are used to compute a MAC over
|
||||
the bytes contained in the JSON field `data` (after removing the Base64
|
||||
encoding and not including the last 32 byte). If the password is incorrect or
|
||||
the key file has been tampered with, the computed MAC will not match the last
|
||||
16 bytes of the data, and restic exits with an error. Otherwise, the data is
|
||||
decrypted with the encryption key derived from `scrypt`. This yields a JSON
|
||||
document which contains the master encryption and message authentication keys
|
||||
for this repository (encoded in Base64). The command `restic cat masterkey` can
|
||||
be used as follows to decrypt and pretty-print the master key:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat masterkey
|
||||
{
|
||||
"mac": {
|
||||
"k": "evFWd9wWlndL9jc501268g==",
|
||||
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
|
||||
},
|
||||
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
|
||||
}
|
||||
```
|
||||
|
||||
All data in the repository is encrypted and authenticated with these master keys.
|
||||
For encryption, the AES-256 algorithm in Counter mode is used. For message
|
||||
authentication, Poly1305-AES is used as described above.
|
||||
|
||||
A repository can have several different passwords, with a key file for each.
|
||||
This way, the password can be changed without having to re-encrypt all data.
|
||||
|
||||
Snapshots
|
||||
---------
|
||||
|
||||
A snapshot represents a directory with all files and sub-directories at a
|
||||
given point in time. For each backup that is made, a new snapshot is created. A
|
||||
snapshot is a JSON document that is stored in an encrypted file below the
|
||||
directory `snapshots` in the repository. The filename is the storage ID. This
|
||||
string is unique and used within restic to uniquely identify a snapshot.
|
||||
|
||||
The command `restic cat snapshot` can be used as follows to decrypt and
|
||||
pretty-print the contents of a snapshot file:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat snapshot 251c2e58
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-01-02T18:10:50.895208559+01:00",
|
||||
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
|
||||
"dir": "/tmp/testdata",
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"tags": [
|
||||
"NL"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Here it can be seen that this snapshot represents the contents of the directory
|
||||
`/tmp/testdata`. The most important field is `tree`. When the meta data (e.g.
|
||||
the tags) of a snapshot change, the snapshot needs to be re-encrypted and saved.
|
||||
This will change the storage ID, so in order to relate these seemingly
|
||||
different snapshots, a field `original` is introduced which contains the ID of
|
||||
the original snapshot, e.g. after adding the tag `DE` to the snapshot above it
|
||||
becomes:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat snapshot 22a5af1b
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-01-02T18:10:50.895208559+01:00",
|
||||
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
|
||||
"dir": "/tmp/testdata",
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"tags": [
|
||||
"NL",
|
||||
"DE"
|
||||
],
|
||||
"original": "251c2e5841355f743f9d4ffd3260bee765acee40a6229857e32b60446991b837"
|
||||
}
|
||||
```
|
||||
|
||||
Once introduced, the `original` field is not modified when the snapshot's meta
|
||||
data is changed again.
|
||||
|
||||
All content within a restic repository is referenced according to its SHA-256
|
||||
hash. Before saving, each file is split into variable sized Blobs of data. The
|
||||
SHA-256 hashes of all Blobs are saved in an ordered list which then represents
|
||||
the content of the file.
|
||||
|
||||
In order to relate these plaintext hashes to the actual location within a Pack
|
||||
file , an index is used. If the index is not available, the header of all data
|
||||
Blobs can be read.
|
||||
|
||||
Trees and Data
|
||||
--------------
|
||||
|
||||
A snapshot references a tree by the SHA-256 hash of the JSON string
|
||||
representation of its contents. Trees and data are saved in pack files in a
|
||||
subdirectory of the directory `data`.
|
||||
|
||||
The command `restic cat blob` can be used to inspect the tree referenced above
|
||||
(piping the output of the command to `jq .` so that the JSON is indented):
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat blob b8138ab08a4722596ac89c917827358da4672eac68e3c03a8115b88dbf4bfb59 | jq .
|
||||
enter password for repository:
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"name": "testdata",
|
||||
"type": "dir",
|
||||
"mode": 493,
|
||||
"mtime": "2014-12-22T14:47:59.912418701+01:00",
|
||||
"atime": "2014-12-06T17:49:21.748468803+01:00",
|
||||
"ctime": "2014-12-22T14:47:59.912418701+01:00",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"user": "fd0",
|
||||
"inode": 409704562,
|
||||
"content": null,
|
||||
"subtree": "b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
A tree contains a list of entries (in the field `nodes`) which contain meta
|
||||
data like a name and timestamps. When the entry references a directory, the
|
||||
field `subtree` contains the plain text ID of another tree object.
|
||||
|
||||
When the command `restic cat blob` is used, the plaintext ID is needed to print
|
||||
a tree. The tree referenced above can be dumped as follows:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat blob 8b238c8811cc362693e91a857460c78d3acf7d9edb2f111048691976803cf16e
|
||||
enter password for repository:
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"name": "testfile",
|
||||
"type": "file",
|
||||
"mode": 420,
|
||||
"mtime": "2014-12-06T17:50:23.34513538+01:00",
|
||||
"atime": "2014-12-06T17:50:23.338468713+01:00",
|
||||
"ctime": "2014-12-06T17:50:23.34513538+01:00",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"user": "fd0",
|
||||
"inode": 416863351,
|
||||
"size": 1234,
|
||||
"links": 1,
|
||||
"content": [
|
||||
"50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d"
|
||||
]
|
||||
},
|
||||
[...]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This tree contains a file entry. This time, the `subtree` field is not present
|
||||
and the `content` field contains a list with one plain text SHA-256 hash.
|
||||
|
||||
The command `restic cat blob` can also be used to extract and decrypt data
|
||||
given a plaintext ID, e.g. for the data mentioned above:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
|
||||
enter password for repository:
|
||||
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
|
||||
```
|
||||
|
||||
As can be seen from the output of the program `sha256sum`, the hash matches the
|
||||
plaintext hash from the map included in the tree above, so the correct data has
|
||||
been returned.
|
||||
|
||||
Locks
|
||||
-----
|
||||
|
||||
The restic repository structure is designed in a way that allows parallel
|
||||
access of multiple instance of restic and even parallel writes. However, there
|
||||
are some functions that work more efficient or even require exclusive access of
|
||||
the repository. In order to implement these functions, restic processes are
|
||||
required to create a lock on the repository before doing anything.
|
||||
|
||||
Locks come in two types: Exclusive and non-exclusive locks. At most one
|
||||
process can have an exclusive lock on the repository, and during that time
|
||||
there must not be any other locks (exclusive and non-exclusive). There may be
|
||||
multiple non-exclusive locks in parallel.
|
||||
|
||||
A lock is a file in the subdir `locks` whose filename is the storage ID of
|
||||
the contents. It is encrypted and authenticated the same way as other files
|
||||
in the repository and contains the following JSON structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2015-06-27T12:18:51.759239612+02:00",
|
||||
"exclusive": false,
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"pid": 13607,
|
||||
"uid": 1000,
|
||||
"gid": 100
|
||||
}
|
||||
```
|
||||
|
||||
The field `exclusive` defines the type of lock. When a new lock is to be
|
||||
created, restic checks all locks in the repository. When a lock is found, it
|
||||
is tested if the lock is stale, which is the case for locks with timestamps
|
||||
older than 30 minutes. If the lock was created on the same machine, even for
|
||||
younger locks it is tested whether the process is still alive by sending a
|
||||
signal to it. If that fails, restic assumes that the process is dead and
|
||||
considers the lock to be stale.
|
||||
|
||||
When a new lock is to be created and no other conflicting locks are
|
||||
detected, restic creates a new lock, waits, and checks if other locks
|
||||
appeared in the repository. Depending on the type of the other locks and the
|
||||
lock to be created, restic either continues or fails.
|
||||
|
||||
Backups and Deduplication
|
||||
=========================
|
||||
|
||||
For creating a backup, restic scans the source directory for all files,
|
||||
sub-directories and other entries. The data from each file is split into
|
||||
variable length Blobs cut at offsets defined by a sliding window of 64 byte.
|
||||
The implementation uses Rabin Fingerprints for implementing this Content
|
||||
Defined Chunking (CDC). An irreducible polynomial is selected at random and
|
||||
saved in the file `config` when a repository is initialized, so that watermark
|
||||
attacks are much harder.
|
||||
|
||||
Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB in
|
||||
size. The implementation aims for 1 MiB Blob size on average.
|
||||
|
||||
For modified files, only modified Blobs have to be saved in a subsequent
|
||||
backup. This even works if bytes are inserted or removed at arbitrary positions
|
||||
within the file.
|
||||
|
||||
Threat Model
|
||||
============
|
||||
|
||||
The design goals for restic include being able to securely store backups in a
|
||||
location that is not completely trusted, e.g. a shared system where others can
|
||||
potentially access the files or (in the case of the system administrator) even
|
||||
modify or delete them.
|
||||
|
||||
General assumptions:
|
||||
|
||||
* The host system a backup is created on is trusted. This is the most basic
|
||||
requirement, and essential for creating trustworthy backups.
|
||||
|
||||
The restic backup program guarantees the following:
|
||||
|
||||
* Accessing the unencrypted content of stored files and metadata should not
|
||||
be possible without a password for the repository. Everything except the
|
||||
metadata included for informational purposes in the key files is encrypted and
|
||||
authenticated.
|
||||
|
||||
* Modifications (intentional or unintentional) can be detected automatically
|
||||
on several layers:
|
||||
|
||||
1. For all accesses of data stored in the repository it is checked whether
|
||||
the cryptographic hash of the contents matches the storage ID (the
|
||||
file's name). This way, modifications (bad RAM, broken harddisk) can be
|
||||
detected easily.
|
||||
|
||||
2. Before decrypting any data, the MAC on the encrypted data is
|
||||
checked. If there has been a modification, the MAC check will
|
||||
fail. This step happens even before the data is decrypted, so data that
|
||||
has been tampered with is not decrypted at all.
|
||||
|
||||
However, the restic backup program is not designed to protect against attackers
|
||||
deleting files at the storage location. There is nothing that can be done about
|
||||
this. If this needs to be guaranteed, get a secure location without any access
|
||||
from third parties. If you assume that attackers have write access to your
|
||||
files at the storage location, attackers are able to figure out (e.g. based on
|
||||
the timestamps of the stored files) which files belong to what snapshot. When
|
||||
only these files are deleted, the particular snapshot vanished and all
|
||||
snapshots depending on data that has been added in the snapshot cannot be
|
||||
restored completely. Restic is not designed to detect this attack.
|
1
doc/Design.md
Symbolic link
@@ -0,0 +1 @@
|
||||
design.rst
|
20
doc/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
SPHINXPROJ = restic
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
837
doc/Manual.md
@@ -1,837 +0,0 @@
|
||||
Thanks for using restic. This document will give you an overview of the basic
|
||||
functionality provided by restic.
|
||||
|
||||
# Installing restic
|
||||
|
||||
## from pre-compiled binary
|
||||
|
||||
You can download the latest pre-compiled binary from the [restic release page](https://github.com/restic/restic/releases/latest).
|
||||
|
||||
## Mac OS X
|
||||
|
||||
If you are using Mac OS X, you can install restic using the
|
||||
[homebrew](http://brew.sh/) packet manager:
|
||||
|
||||
```console
|
||||
$ brew tap restic/restic
|
||||
$ brew install restic
|
||||
```
|
||||
|
||||
## archlinux
|
||||
|
||||
On archlinux, there is a package called `restic-git` which can be installed from AUR, e.g. with `pacaur`:
|
||||
|
||||
```console
|
||||
$ pacaur -S restic-git
|
||||
```
|
||||
|
||||
# Building restic
|
||||
|
||||
restic is written in the Go programming language and you need at least Go version 1.7.
|
||||
Building restic may also work with older versions of Go, but that's not supported.
|
||||
See the [Getting started](https://golang.org/doc/install) guide of the Go project for
|
||||
instructions how to install Go.
|
||||
|
||||
In order to build restic from source, execute the following steps:
|
||||
|
||||
```console
|
||||
$ git clone https://github.com/restic/restic
|
||||
[...]
|
||||
|
||||
$ cd restic
|
||||
|
||||
$ go run build.go
|
||||
```
|
||||
|
||||
At the moment, the only tested compiler for restic is the official Go compiler.
|
||||
Building restic with gccgo may work, but is not supported.
|
||||
|
||||
# Usage help
|
||||
|
||||
Usage help is available:
|
||||
|
||||
```console
|
||||
$ ./restic --help
|
||||
restic is a backup program which allows saving multiple revisions of files and
|
||||
directories in an encrypted repository stored on different backends.
|
||||
|
||||
Usage:
|
||||
restic [command]
|
||||
|
||||
Available Commands:
|
||||
backup create a new backup of files and/or directories
|
||||
cat print internal objects to stdout
|
||||
check check the repository for errors
|
||||
find find a file or directory
|
||||
forget forget removes snapshots from the repository
|
||||
init initialize a new repository
|
||||
key manage keys (passwords)
|
||||
list list items in the repository
|
||||
ls list files in a snapshot
|
||||
mount mount the repository
|
||||
prune remove unneeded data from the repository
|
||||
rebuild-index build a new index file
|
||||
restore extract the data from a snapshot
|
||||
snapshots list all snapshots
|
||||
tag modifies tags on snapshots
|
||||
unlock remove locks other processes created
|
||||
version Print version information
|
||||
|
||||
Flags:
|
||||
--json set output mode to JSON for commands that support it
|
||||
--no-lock do not lock the repo, this allows some operations on read-only repos
|
||||
-p, --password-file string read the repository password from a file
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
|
||||
Use "restic [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
Similar to programs such as `git`, restic has a number of sub-commands. You can
|
||||
see these commands in the listing above. Each sub-command may have own
|
||||
command-line options, and there is a help option for each command which lists
|
||||
them, e.g. for the `backup` command:
|
||||
|
||||
```console
|
||||
$ ./restic backup --help
|
||||
The "backup" command creates a new snapshot and saves the files and directories
|
||||
given as the arguments.
|
||||
|
||||
Usage:
|
||||
restic backup [flags] FILE/DIR [FILE/DIR] ...
|
||||
|
||||
Flags:
|
||||
-e, --exclude pattern exclude a pattern (can be specified multiple times)
|
||||
--exclude-file string read exclude patterns from a file
|
||||
--files-from string read the files to backup from file (can be combined with file args)
|
||||
-f, --force force re-reading the target files/directories. Overrides the "parent" flag
|
||||
-x, --one-file-system Exclude other file systems
|
||||
--parent string use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)
|
||||
--stdin read backup from stdin
|
||||
--stdin-filename string file name to use when reading from stdin
|
||||
--tag tag add a tag for the new snapshot (can be specified multiple times)
|
||||
|
||||
Global Flags:
|
||||
--json set output mode to JSON for commands that support it
|
||||
--no-lock do not lock the repo, this allows some operations on read-only repos
|
||||
-p, --password-file string read the repository password from a file
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
```
|
||||
|
||||
Subcommand that support showing progress information such as `backup`, `check` and `prune` will do so unless
|
||||
the quiet flag `-q` or `--quiet` is set. When running from a non-interactive console progress reporting will
|
||||
be limited to once every 10 seconds to not fill your logs.
|
||||
|
||||
Additionally on Unix systems if `restic` receives a SIGUSR signal the current progress will written to the
|
||||
standard output so you can check up on the status at will.
|
||||
|
||||
|
||||
# Initialize a repository
|
||||
|
||||
First, we need to create a "repository". This is the place where your backups
|
||||
will be saved at.
|
||||
|
||||
In order to create a repository at `/tmp/backup`, run the following command and
|
||||
enter the same password twice:
|
||||
|
||||
```console
|
||||
$ restic init --repo /tmp/backup
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend 085b3c76b9 at /tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
```
|
||||
|
||||
Other backends like sftp and s3 are [described in a later section](#create-an-sftp-repository) of this document.
|
||||
|
||||
Remembering your password is important! If you lose it, you won't be able to
|
||||
access data stored in the repository.
|
||||
|
||||
For automated backups, restic accepts the repository location in the
|
||||
environment variable `RESTIC_REPOSITORY`. The password can be read from a file
|
||||
(via the option `--password-file`) or the environment variable
|
||||
`RESTIC_PASSWORD`.
|
||||
|
||||
## Password prompt on Windows
|
||||
|
||||
At the moment, restic only supports the default Windows console interaction.
|
||||
If you use emulation environments like [MSYS2](https://msys2.github.io/) or
|
||||
[Cygwin](https://www.cygwin.com/), which use terminals like `Mintty` or `rxvt`,
|
||||
you may get a password error:
|
||||
|
||||
You can workaround this by using a special tool called `winpty` (look
|
||||
[here](https://sourceforge.net/p/msys2/wiki/Porting/) and
|
||||
[here](https://github.com/rprichard/winpty) for detail information). On MSYS2,
|
||||
you can install `winpty` as follows:
|
||||
|
||||
```console
|
||||
$ pacman -S winpty
|
||||
$ winpty restic -r /tmp/backup init
|
||||
```
|
||||
|
||||
# Create a snapshot
|
||||
|
||||
Now we're ready to backup some data. The contents of a directory at a specific
|
||||
point in time is called a "snapshot" in restic. Run the following command and
|
||||
enter the repository password you chose above again:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup ~/work
|
||||
enter password for repository:
|
||||
scan [/home/user/work]
|
||||
scanned 764 directories, 1816 files in 0:00
|
||||
[0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
|
||||
duration: 0:29, 54.47MiB/s
|
||||
snapshot 40dc1520 saved
|
||||
```
|
||||
|
||||
As you can see, restic created a backup of the directory and was pretty fast!
|
||||
The specific snapshot just created is identified by a sequence of hexadecimal
|
||||
characters, `40dc1520` in this case.
|
||||
|
||||
If you run the command again, restic will create another snapshot of your data,
|
||||
but this time it's even faster. This is de-duplication at work!
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup ~/shared/work/web
|
||||
enter password for repository:
|
||||
using parent snapshot 40dc1520aa6a07b7b3ae561786770a01951245d2367241e71e9485f18ae8228c
|
||||
scan [/home/user/work]
|
||||
scanned 764 directories, 1816 files in 0:00
|
||||
[0:00] 100.00% 0B/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
|
||||
duration: 0:00, 6572.38MiB/s
|
||||
snapshot 79766175 saved
|
||||
```
|
||||
|
||||
You can even backup individual files in the same repository.
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup ~/work.txt
|
||||
scan [~/work.txt]
|
||||
scanned 0 directories, 1 files in 0:00
|
||||
[0:00] 100.00% 0B/s 220B / 220B 1 / 1 items 0 errors ETA 0:00
|
||||
duration: 0:00, 0.03MiB/s
|
||||
snapshot 31f7bd63 saved
|
||||
```
|
||||
|
||||
In fact several hosts may use the same repository to backup directories and
|
||||
files leading to a greater de-duplication.
|
||||
|
||||
Please be aware that when you backup different directories (or the directories
|
||||
to be saved have a variable name component like a time/date), restic always
|
||||
needs to read all files and only afterwards can compute which parts of the
|
||||
files need to be saved. When you backup the same directory again (maybe with
|
||||
new or changed files) restic will find the old snapshot in the repo and by
|
||||
default only reads those files that are new or have been modified since the
|
||||
last snapshot. This is decided based on the modify date of the file in the
|
||||
file system.
|
||||
|
||||
You can exclude folders and files by specifying exclude-patterns.
|
||||
Either specify them with multiple `--exclude`'s or one `--exclude-file`
|
||||
|
||||
```console
|
||||
$ cat exclude
|
||||
# exclude go-files
|
||||
*.go
|
||||
# exclude foo/x/y/z/bar foo/x/bar foo/bar
|
||||
foo/**/bar
|
||||
$ restic -r /tmp/backup backup ~/work --exclude=*.c --exclude-file=exclude
|
||||
```
|
||||
|
||||
Patterns use [`filepath.Glob`](https://golang.org/pkg/path/filepath/#Glob) internally,
|
||||
see [`filepath.Match`](https://golang.org/pkg/path/filepath/#Match) for syntax.
|
||||
Additionally `**` excludes arbitrary subdirectories.
|
||||
Environment-variables in exclude-files are expanded with [`os.ExpandEnv`](https://golang.org/pkg/os/#ExpandEnv).
|
||||
|
||||
By specifying the option `--one-file-system` you can instruct restic to only
|
||||
backup files from the file systems the initially specified files or directories
|
||||
reside on. For example, calling restic like this won't backup `/sys` or
|
||||
`/dev` on a Linux system:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup --one-file-system /
|
||||
```
|
||||
By using the `--files-from` option you can read the files you want to backup
|
||||
from a file. This is especially useful if a lot of files have to be backed up
|
||||
that are not in the same folder or are maybe pre-filtered by other software.
|
||||
|
||||
For example maybe you want to backup files that have a certain filename in them:
|
||||
|
||||
```console
|
||||
$ find /tmp/somefiles | grep 'PATTERN' > /tmp/files_to_backup
|
||||
```
|
||||
|
||||
You can then use restic to backup the filtered files:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup
|
||||
```
|
||||
|
||||
Incidentally you can also combine `--files-from` with the normal files args:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup /tmp/some_additional_file
|
||||
```
|
||||
|
||||
## Reading data from stdin
|
||||
|
||||
Sometimes it can be nice to directly save the output of a program, e.g.
|
||||
`mysqldump` so that the SQL can later be restored. Restic supports this mode of
|
||||
operation, just supply the option `--stdin` to the `backup` command like this:
|
||||
|
||||
```console
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin
|
||||
```
|
||||
|
||||
This creates a new snapshot of the output of `mysqldump`. You can then use e.g.
|
||||
the fuse mounting option (see below) to mount the repository and read the file.
|
||||
|
||||
By default, the file name `stdin` is used, a different name can be specified
|
||||
with `--stdin-filename`, e.g. like this:
|
||||
|
||||
```console
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filename production.sql
|
||||
```
|
||||
|
||||
## Tags
|
||||
|
||||
Snapshots can have one or more tags, short strings which add identifying
|
||||
information. Just specify the tags for a snapshot with `--tag`:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup backup --tag projectX ~/shared/work/web
|
||||
[...]
|
||||
```
|
||||
|
||||
The tags can later be used to keep (or forget) snapshots.
|
||||
|
||||
# List all snapshots
|
||||
|
||||
Now, you can list all the snapshots stored in the repository:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
```
|
||||
|
||||
You can filter the listing by directory path:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots --path="/srv"
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
```
|
||||
|
||||
Or filter by host:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots --host luigi
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
```
|
||||
|
||||
Combining filters is also possible.
|
||||
|
||||
# Restore a snapshot
|
||||
|
||||
Restoring a snapshot is as easy as it sounds, just use the following command to
|
||||
restore the contents of the latest snapshot to `/tmp/restore-work`:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup restore 79766175 --target ~/tmp/restore-work
|
||||
enter password for repository:
|
||||
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
|
||||
```
|
||||
|
||||
Use the word `latest` to restore the last backup. You can also combine `latest`
|
||||
with the `--host` and `--path` filters to choose the last backup for a specific
|
||||
host, path or both.
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup restore latest --target ~/tmp/restore-work --path "/home/art" --host luigi
|
||||
enter password for repository:
|
||||
restoring <Snapshot of [/home/art] at 2015-05-08 21:45:17.884408621 +0200 CEST> to /tmp/restore-work
|
||||
```
|
||||
|
||||
# Manage repository keys
|
||||
|
||||
The `key` command allows you to set multiple access keys or passwords per
|
||||
repository. In fact, you can use the `list`, `add`, `remove` and `passwd`
|
||||
sub-commands to manage these keys very precisely:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup key list
|
||||
enter password for repository:
|
||||
ID User Host Created
|
||||
----------------------------------------------------------------------
|
||||
*eb78040b username kasimir 2015-08-12 13:29:57
|
||||
|
||||
$ restic -r /tmp/backup key add
|
||||
enter password for repository:
|
||||
enter password for new key:
|
||||
enter password again:
|
||||
saved new key as <Key of username@kasimir, created on 2015-08-12 13:35:05.316831933 +0200 CEST>
|
||||
|
||||
$ restic -r backup key list
|
||||
enter password for repository:
|
||||
ID User Host Created
|
||||
----------------------------------------------------------------------
|
||||
5c657874 username kasimir 2015-08-12 13:35:05
|
||||
*eb78040b username kasimir 2015-08-12 13:29:57
|
||||
```
|
||||
|
||||
# Manage tags
|
||||
|
||||
Managing tags on snapshots is done with the `tag` command. The existing set of
|
||||
tags can be replaced completely, tags can be added to removed. The result is
|
||||
directly visible in the `snapshots` command.
|
||||
|
||||
Let's say we want to tag snapshot `590c8fc8` with the tags `NL` and `CH` and
|
||||
remove all other tags that may be present, the following command does that:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup tag --set NL,CH 590c8fc8
|
||||
Create exclusive lock for repository
|
||||
Modified tags on 1 snapshots
|
||||
```
|
||||
|
||||
Note the snapshot ID has changed, so between each change we need to look up
|
||||
the new ID of the snapshot. But there is an even better way, the `tag` command
|
||||
accepts `--tag` for a filter, so we can filter snapshots based on the tag we
|
||||
just added.
|
||||
|
||||
So we can add and remove tags incrementally like this:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup tag --tag NL --remove CH
|
||||
Create exclusive lock for repository
|
||||
Modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --add UK
|
||||
Create exclusive lock for repository
|
||||
Modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --remove NL
|
||||
Create exclusive lock for repository
|
||||
Modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --add SOMETHING
|
||||
No snapshots were modified
|
||||
```
|
||||
|
||||
# Check integrity and consistency
|
||||
|
||||
Imagine your repository is saved on a server that has a faulty hard drive, or
|
||||
even worse, attackers get privileged access and modify your backup with the
|
||||
intention to make you restore malicious data:
|
||||
|
||||
```console
|
||||
$ sudo echo "boom" >> backup/index/d795ffa99a8ab8f8e42cec1f814df4e48b8f49129360fb57613df93739faee97
|
||||
```
|
||||
|
||||
In order to detect these things, it is a good idea to regularly use the `check`
|
||||
command to test whether everything is alright, your precious backup data is
|
||||
consistent and the integrity is unharmed:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup check
|
||||
Load indexes
|
||||
ciphertext verification failed
|
||||
```
|
||||
|
||||
Trying to restore a snapshot which has been modified as shown above will yield
|
||||
the same error:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup restore 79766175 --target ~/tmp/restore-work
|
||||
Load indexes
|
||||
ciphertext verification failed
|
||||
```
|
||||
|
||||
# Mount a repository
|
||||
|
||||
Browsing your backup as a regular file system is also very easy. First, create
|
||||
a mount point such as `/mnt/restic` and then use the following command to serve
|
||||
the repository with FUSE:
|
||||
|
||||
```console
|
||||
$ mkdir /mnt/restic
|
||||
$ restic -r /tmp/backup mount /mnt/restic
|
||||
enter password for repository:
|
||||
Now serving /tmp/backup at /tmp/restic
|
||||
Don't forget to umount after quitting!
|
||||
```
|
||||
|
||||
Mounting repositories via FUSE is not possible on Windows and OpenBSD.
|
||||
|
||||
Restic supports storage and preservation of hard links. However, since hard links
|
||||
exist in the scope of a filesystem by definition, restoring hard links from a fuse
|
||||
mount should be done by a program that preserves hard links. A program that does so
|
||||
is rsync, used with the option --hard-links.
|
||||
|
||||
# Create an SFTP repository
|
||||
|
||||
In order to backup data via SFTP, you must first set up a server with SSH and
|
||||
let it know your public key. Passwordless login is really important since
|
||||
restic fails to connect to the repository if the server prompts for
|
||||
credentials.
|
||||
|
||||
Once the server is configured, the setup of the SFTP repository can simply be
|
||||
achieved by changing the URL scheme in the `init` command:
|
||||
|
||||
```console
|
||||
$ restic -r sftp:user@host:/tmp/backup init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend f1c6108821 at sftp:user@host:/tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
```
|
||||
|
||||
You can also specify a relative (read: no slash (`/`) character at the
|
||||
beginning) directory, in this case the dir is relative to the remote user's
|
||||
home directory.
|
||||
|
||||
# Create a REST server repository
|
||||
|
||||
In order to backup data to the remote server via HTTP or HTTPS protocol,
|
||||
you must first set up a remote [REST server](https://github.com/restic/rest-server)
|
||||
instance. Once the server is configured, accessing it is achieved by changing the
|
||||
URL scheme like this:
|
||||
|
||||
```console
|
||||
$ restic -r rest:http://host:8000/
|
||||
```
|
||||
|
||||
Depending on your REST server setup, you can use HTTPS protocol, password
|
||||
protection, or multiple repositories. Or any combination of those features, as
|
||||
you see fit. TCP/IP port is also configurable. Here are some more examples:
|
||||
|
||||
```console
|
||||
$ restic -r rest:https://host:8000/
|
||||
$ restic -r rest:https://user:pass@host:8000/
|
||||
$ restic -r rest:https://user:pass@host:8000/my_backup_repo/
|
||||
```
|
||||
|
||||
If you use TLS, make sure your certificates are signed, 'cause restic client
|
||||
will refuse to communicate otherwise. It's easy to obtain such certificates
|
||||
today, thanks to free certificate authorities like [Let’s
|
||||
Encrypt](https://letsencrypt.org/).
|
||||
|
||||
REST server uses exactly the same directory structure as local backend, so you
|
||||
should be able to access it both locally and via HTTP, even simultaneously.
|
||||
|
||||
# Create an Amazon S3 repository
|
||||
|
||||
Restic can backup data to any Amazon S3 bucket. However, in this case, changing the URL scheme is not enough since Amazon uses special security credentials to sign HTTP requests. By consequence, you must first setup the following environment variables with the credentials you obtained while creating the bucket.
|
||||
|
||||
```console
|
||||
$ export AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||
$ export AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||
```
|
||||
|
||||
You can then easily initialize a repository that uses your Amazon S3 as a backend, if the bucket does not exist yet it will be created in the default location:
|
||||
|
||||
```console
|
||||
$ restic -r s3:s3.amazonaws.com/bucket_name init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at s3:s3.amazonaws.com/bucket_name
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
```
|
||||
|
||||
It is not possible at the moment to have restic create a new bucket in a different location, so you need to create it using a different program. Afterwards, the S3 server (`s3.amazonaws.com`) will redirect restic to the correct endpoint.
|
||||
|
||||
For an S3-compatible server that is not Amazon (like Minio, see below), or is
|
||||
only available via HTTP, you can specify the URL to the server like this:
|
||||
`s3:http://server:port/bucket_name`.
|
||||
|
||||
## Create a Minio Server repository
|
||||
|
||||
[Minio](https://www.minio.io) is an Open Source Object Storage, written in Go and compatible with AWS S3 API.
|
||||
|
||||
### Pre-Requisites
|
||||
|
||||
* Download and Install [Minio Server](https://minio.io/downloads/#minio-server).
|
||||
* You can also refer to [https://docs.minio.io](https://docs.minio.io) for step by step guidance on installation and getting started on Minio Client and Minio Server.
|
||||
|
||||
You must first setup the following environment variables with the credentials of your running Minio Server.
|
||||
|
||||
```console
|
||||
$ export AWS_ACCESS_KEY_ID=<YOUR-MINIO-ACCESS-KEY-ID>
|
||||
$ export AWS_SECRET_ACCESS_KEY= <YOUR-MINIO-SECRET-ACCESS-KEY>
|
||||
```
|
||||
|
||||
Now you can easily initialize restic to use Minio server as backend with this command.
|
||||
|
||||
```console
|
||||
$ ./restic -r s3:http://localhost:9000/restic init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend 6ad29560f5 at s3:http://localhost:9000/restic1
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is irrecoverably lost.
|
||||
```
|
||||
|
||||
# Removing old snapshots
|
||||
|
||||
All backup space is finite, so restic allows removing old snapshots. This can
|
||||
be done either manually (by specifying a snapshot ID to remove) or by using a
|
||||
policy that describes which snapshots to forget. For all remove operations, two
|
||||
commands need to be called in sequence: `forget` to remove a snapshot and
|
||||
`prune` to actually remove the data that was referenced by the snapshot from
|
||||
the repository. This can be automated with the `--prune` option of the `forget`
|
||||
command, which runs `prune` automatically if snapshots have been removed.
|
||||
|
||||
## Remove a single snapshot
|
||||
|
||||
The command `snapshots` can be used to list all snapshots in a repository like this:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
```
|
||||
|
||||
In order to remove the snapshot of `/home/art`, use the `forget` command and
|
||||
specify the snapshot ID on the command line:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup forget bdbd3439
|
||||
enter password for repository:
|
||||
removed snapshot d3f01f63
|
||||
```
|
||||
|
||||
Afterwards this snapshot is removed:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
```
|
||||
|
||||
But the data that was referenced by files in this snapshot is still stored in
|
||||
the repository. To cleanup unreferenced data, the `prune` command must be run:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup prune
|
||||
enter password for repository:
|
||||
|
||||
counting files in repo
|
||||
building new index for repo
|
||||
[0:00] 100.00% 22 / 22 files
|
||||
repository contains 22 packs (8512 blobs) with 100.092 MiB bytes
|
||||
processed 8512 blobs: 0 duplicate blobs, 0B duplicate
|
||||
load all snapshots
|
||||
find data that is still in use for 1 snapshots
|
||||
[0:00] 100.00% 1 / 1 snapshots
|
||||
found 8433 of 8512 data blobs still in use
|
||||
will rewrite 3 packs
|
||||
creating new index
|
||||
[0:00] 86.36% 19 / 22 files
|
||||
saved new index as 544a5084
|
||||
done
|
||||
```
|
||||
|
||||
Afterwards the repository is smaller.
|
||||
|
||||
You can automate this two-step process by using the `--prune` switch to
|
||||
`forget`:
|
||||
|
||||
```console
|
||||
$ restic forget --keep-last 1 --prune
|
||||
snapshots for host mopped, directories /home/user/work:
|
||||
|
||||
keep 1 snapshots:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
4bba301e 2017-02-21 10:49:18 mopped /home/user/work
|
||||
|
||||
remove 1 snapshots:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
8c02b94b 2017-02-21 10:48:33 mopped /home/user/work
|
||||
|
||||
1 snapshots have been removed, running prune
|
||||
counting files in repo
|
||||
building new index for repo
|
||||
[0:00] 100.00% 37 / 37 packs
|
||||
repository contains 37 packs (5521 blobs) with 151.012 MiB bytes
|
||||
processed 5521 blobs: 0 duplicate blobs, 0B duplicate
|
||||
load all snapshots
|
||||
find data that is still in use for 1 snapshots
|
||||
[0:00] 100.00% 1 / 1 snapshots
|
||||
found 5323 of 5521 data blobs still in use, removing 198 blobs
|
||||
will delete 0 packs and rewrite 27 packs, this frees 22.106 MiB
|
||||
creating new index
|
||||
[0:00] 100.00% 30 / 30 packs
|
||||
saved new index as b49f3e68
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
## Removing snapshots according to a policy
|
||||
|
||||
Removing snapshots manually is tedious and error-prone, therefore restic allows
|
||||
specifying which snapshots should be removed automatically according to a
|
||||
policy. You can specify how many hourly, daily, weekly, monthly and yearly
|
||||
snapshots to keep, any other snapshots are removed. The most important
|
||||
command-line parameter here is `--dry-run` which instructs restic to not remove
|
||||
anything but print which snapshots would be removed.
|
||||
|
||||
When `forget` is run with a policy, restic loads the list of all snapshots,
|
||||
then groups these by host name and list of directories. The policy is then
|
||||
applied to each group of snapshots separately. This is a safety feature.
|
||||
|
||||
The `forget` command accepts the following parameters:
|
||||
|
||||
* `--keep-last n` never delete the `n` last (most recent) snapshots
|
||||
* `--keep-hourly n` for the last `n` hours in which a snapshot was made, keep
|
||||
only the last snapshot for each hour.
|
||||
* `--keep-daily n` for the last `n` days which have one or more snapshots, only
|
||||
keep the last one for that day.
|
||||
* `--keep-weekly n` for the last `n` weeks which have one or more snapshots, only
|
||||
keep the last one for that week.
|
||||
* `--keep-monthly n` for the last `n` months which have one or more snapshots, only
|
||||
keep the last one for that month.
|
||||
* `--keep-yearly n` for the last `n` years which have one or more snapshots, only
|
||||
keep the last one for that year.
|
||||
* `--keep-tag` keep all snapshots which have all tags specified by this option
|
||||
(can be specified multiple times).
|
||||
|
||||
Additionally, you can restrict removing snapshots to those which have a
|
||||
particular hostname with the `--hostname` parameter, or tags with the `--tag`
|
||||
option. When multiple tags are specified, only the snapshots which have all the
|
||||
tags are considered.
|
||||
|
||||
All the `--keep-*` options above only count hours/days/weeks/months/years which
|
||||
have a snapshot, so those without a snapshot are ignored.
|
||||
|
||||
Let's explain this with an example: Suppose you have only made a backup on each
|
||||
Sunday for 12 weeks. Then `forget --keep-daily 4` will keep the last four snapshots
|
||||
for the last four Sundays, but remove the rest. Only counting the days which
|
||||
have a backup and ignore the ones without is a safety feature: it prevents
|
||||
restic from removing many snapshots when no new ones are created. If it was
|
||||
implemented otherwise, running `forget --keep-daily 4` on a Friday would remove
|
||||
all snapshots!
|
||||
|
||||
# Debugging restic
|
||||
|
||||
The program can be built with debug support like this:
|
||||
|
||||
```console
|
||||
$ go run build.go -tags debug
|
||||
```
|
||||
|
||||
Afterwards, extensive debug messages are written to the file in environment
|
||||
variable `DEBUG_LOG`, e.g.:
|
||||
|
||||
```console
|
||||
$ DEBUG_LOG=/tmp/restic-debug.log restic backup ~/work
|
||||
```
|
||||
|
||||
If you suspect that there is a bug, you can have a look at the debug log.
|
||||
Please be aware that the debug log might contain sensitive information such as
|
||||
file and directory names.
|
||||
|
||||
The debug log will always contain all log messages restic generates. You can
|
||||
also instruct restic to print some or all debug messages to stderr. These can
|
||||
also be limited to e.g. a list of source files or a list of patterns for
|
||||
function names. The patterns are globbing patterns (see the documentation for
|
||||
[`path.Glob`](https://golang.org/pkg/path/#Glob)), multiple patterns are
|
||||
separated by commas. Patterns are case sensitive.
|
||||
|
||||
Printing all log messages to the console can be achieved by setting the file
|
||||
filter to `*`:
|
||||
|
||||
```console
|
||||
$ DEBUG_FILES=* restic check
|
||||
```
|
||||
|
||||
If you want restic to just print all debug log messages from the files
|
||||
`main.go` and `lock.go`, set the environment variable `DEBUG_FILES` like this:
|
||||
|
||||
```console
|
||||
$ DEBUG_FILES=main.go,lock.go restic check
|
||||
```
|
||||
|
||||
The following command line instructs restic to only print debug statements
|
||||
originating in functions that match the pattern `*unlock*` (case sensitive):
|
||||
|
||||
```console
|
||||
$ DEBUG_FUNCS=*unlock* restic check
|
||||
```
|
||||
|
||||
# Under the hood: Browse repository objects
|
||||
|
||||
Internally, a repository stores data of several different types described in the [design documentation](https://github.com/restic/restic/blob/master/doc/Design.md). You can `list` objects such as blobs, packs, index, snapshots, keys or locks with the following command:
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup list snapshots
|
||||
d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
|
||||
```
|
||||
|
||||
The `find` command searches for a given
|
||||
[pattern](http://golang.org/pkg/path/filepath/#Match) in the repository.
|
||||
|
||||
```console
|
||||
$ restic -r backup find test.txt
|
||||
debug log file restic.log
|
||||
debug enabled
|
||||
enter password for repository:
|
||||
found 1 matching entries in snapshot 196bc5760c909a7681647949e80e5448e276521489558525680acf1bd428af36
|
||||
-rw-r--r-- 501 20 5 2015-08-26 14:09:57 +0200 CEST path/to/test.txt
|
||||
```
|
||||
|
||||
The `cat` command allows you to display the JSON representation of the objects
|
||||
or its raw content.
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-08-12T12:52:44.091448856+02:00",
|
||||
"tree": "05cec17e8d3349f402576d02576a2971fc0d9f9776ce2f441c7010849c4ff5af",
|
||||
"paths": [
|
||||
"/home/user/work"
|
||||
],
|
||||
"hostname": "kasimir",
|
||||
"username": "username",
|
||||
"uid": 501,
|
||||
"gid": 20
|
||||
}
|
||||
```
|
||||
|
||||
# Scripting restic
|
||||
|
||||
Restic supports the output of some commands in JSON format. The JSON flag ```--json``` is currently supported only by ```restic snapshots```.
|
||||
|
||||
```console
|
||||
$ restic -r /tmp/backup snapshots --json```
|
38
doc/PKGBUILD
@@ -1,38 +0,0 @@
|
||||
# Maintainer: Florian Daniel <fd@noxa.de>
|
||||
# Contributor: Eldar Tsraev <elts@culab.org>
|
||||
# Contributor: Andreas Guth <andreas.guth@rwth-aachen.de>
|
||||
# Contributor: Alexander Neumann <alexander@bumpern.de>
|
||||
options=(!strip)
|
||||
pkgname=restic-git
|
||||
pkgver=v0.1.0.r172.g1f1b8e1
|
||||
pkgrel=1
|
||||
pkgdesc="restic is a program that does backups right."
|
||||
arch=('i686' 'x86_64')
|
||||
url="https://github.com/restic/restic"
|
||||
license=('BSD')
|
||||
depends=('glibc')
|
||||
makedepends=('git' 'go>=1.3')
|
||||
provides=('restic')
|
||||
conflicts=('restic')
|
||||
source=("${pkgname}::git+https://github.com/restic/restic")
|
||||
md5sums=('SKIP')
|
||||
|
||||
importpath='github.com/restic/restic'
|
||||
|
||||
pkgver() {
|
||||
cd "$pkgname"
|
||||
git describe --long | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
|
||||
}
|
||||
|
||||
build() {
|
||||
cd "$pkgname"
|
||||
go run build.go
|
||||
}
|
||||
|
||||
package() {
|
||||
install -Dm755 "$pkgname/restic" "$pkgdir/usr/bin/restic"
|
||||
install -Dm644 "$pkgname/LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||
install -Dm644 "$pkgname/README.md" "$pkgdir/usr/share/doc/$pkgname/README"
|
||||
}
|
||||
|
||||
# vim:set ts=2 sw=2 et:
|
@@ -1,72 +0,0 @@
|
||||
REST Backend
|
||||
============
|
||||
|
||||
Restic can interact with HTTP Backend that respects the following REST API. The
|
||||
following values are valid for `{type}`: `data`, `keys`, `locks`, `snapshots`,
|
||||
`index`, `config`. `{path}` is a path to the repository, so that multiple
|
||||
different repositories can be accessed. The default path is `/`.
|
||||
|
||||
## POST {path}?create=true
|
||||
|
||||
This request is used to initially create a new repository. The server responds
|
||||
with "200 OK" if the repository structure was created successfully or already
|
||||
exists, otherwise an error is returned.
|
||||
|
||||
## DELETE {path}
|
||||
|
||||
Deletes the repository on the server side. The server responds with "200 OK" if
|
||||
the repository was successfully removed. If this function is not implemented
|
||||
the server returns "501 Not Implemented", if this it is denied by the server it
|
||||
returns "403 Forbidden".
|
||||
|
||||
## HEAD {path}/config
|
||||
|
||||
Returns "200 OK" if the repository has a configuration,
|
||||
an HTTP error otherwise.
|
||||
|
||||
## GET {path}/config
|
||||
|
||||
Returns the content of the configuration file if the repository has a configuration,
|
||||
an HTTP error otherwise.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
## POST {path}/config
|
||||
|
||||
Returns "200 OK" if the configuration of the request body has been saved,
|
||||
an HTTP error otherwise.
|
||||
|
||||
## GET {path}/{type}/
|
||||
|
||||
Returns a JSON array containing the names of all the blobs stored for a given type.
|
||||
|
||||
Response format: JSON
|
||||
|
||||
## HEAD {path}/{type}/{name}
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type is stored in the repository,
|
||||
"404 not found" otherwise. If the blob exists, the HTTP header `Content-Length`
|
||||
is set to the file size.
|
||||
|
||||
## GET {path}/{type}/{name}
|
||||
|
||||
Returns the content of the blob with the given name and type if it is stored in the repository,
|
||||
"404 not found" otherwise.
|
||||
|
||||
If the request specifies a partial read with a Range header field,
|
||||
then the status code of the response is 206 instead of 200
|
||||
and the response only contains the specified range.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
## POST {path}/{type}/{name}
|
||||
|
||||
Saves the content of the request body as a blob with the given name and type,
|
||||
an HTTP error otherwise.
|
||||
|
||||
Request format: binary/octet-stream
|
||||
|
||||
## DELETE {path}/{type}/{name}
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type has been deleted from the repository,
|
||||
an HTTP error otherwise.
|
10
doc/_static/css/restic.css
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
@import url('theme.css');
|
||||
|
||||
.wy-side-nav-search {
|
||||
background-color: #0000b4;
|
||||
}
|
||||
|
||||
.logo {
|
||||
height: 50% !important;
|
||||
width: 50% !important;
|
||||
}
|
BIN
doc/_static/favicon.ico
vendored
Normal file
After Width: | Height: | Size: 1.4 KiB |
124
doc/code.css
@@ -1,124 +0,0 @@
|
||||
code {
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
/* based on https://github.com/mkdocs/mkdocs/issues/1019 */
|
||||
|
||||
.codehilite code, .codehilite pre {
|
||||
color:#3F3F3F;background-color:#F7F7F7;
|
||||
overflow: auto;
|
||||
box-sizing: border-box;
|
||||
|
||||
padding: 0.01em 4px;
|
||||
padding-top: 0.01em;
|
||||
padding-right-value: 4px;
|
||||
padding-bottom: 0.01em;
|
||||
padding-left-value: 4px;
|
||||
padding-left-ltr-source: physical;
|
||||
padding-left-rtl-source: physical;
|
||||
padding-right-ltr-source: physical;
|
||||
padding-right-rtl-source: physical;
|
||||
|
||||
border-radius: 4px !important;
|
||||
border-top-left-radius: 4px;
|
||||
border-top-right-radius: 4px;
|
||||
border-bottom-right-radius: 4px;
|
||||
border-bottom-left-radius: 4px;
|
||||
|
||||
border: 1px solid #CCC !important;
|
||||
border-top-width: 1px;
|
||||
border-right-width-value: 1px;
|
||||
border-right-width-ltr-source: physical;
|
||||
border-right-width-rtl-source: physical;
|
||||
border-bottom-width: 1px;
|
||||
border-left-width-value: 1px;
|
||||
border-left-width-ltr-source: physical;
|
||||
border-left-width-rtl-source: physical;
|
||||
border-top-style: solid;
|
||||
border-right-style-value: solid;
|
||||
border-right-style-ltr-source: physical;
|
||||
border-right-style-rtl-source: physical;
|
||||
border-bottom-style: solid;
|
||||
border-left-style-value: solid;
|
||||
border-left-style-ltr-source: physical;
|
||||
border-left-style-rtl-source: physical;
|
||||
border-top-color: #CCC;
|
||||
border-right-color-value: #CCC;
|
||||
border-right-color-ltr-source: physical;
|
||||
border-right-color-rtl-source: physical;
|
||||
border-bottom-color: #CCC;
|
||||
border-left-color-value: #CCC;
|
||||
border-left-color-ltr-source: physical;
|
||||
border-left-color-rtl-source: physical;
|
||||
-moz-border-top-colors: none;
|
||||
-moz-border-right-colors: none;
|
||||
-moz-border-bottom-colors: none;
|
||||
-moz-border-left-colors: none;
|
||||
border-image-source: none;
|
||||
border-image-slice: 100% 100% 100% 100%;
|
||||
border-image-width: 1 1 1 1;
|
||||
border-image-outset: 0 0 0 0;
|
||||
border-image-repeat: stretch stretch;
|
||||
}
|
||||
|
||||
.codehilite .hll { background-color: #ffffcc }
|
||||
.codehilite .c { color: #999988; font-style: italic } /* Comment */
|
||||
.codehilite .err { color: #a61717; background-color: #e3d2d2 } /* Error */
|
||||
.codehilite .k { color: #000000; font-weight: bold } /* Keyword */
|
||||
.codehilite .o { color: #000000; font-weight: bold } /* Operator */
|
||||
.codehilite .cm { color: #999988; font-style: italic } /* Comment.Multiline */
|
||||
.codehilite .cp { color: #999999; font-weight: bold; font-style: italic } /* Comment.Preproc */
|
||||
.codehilite .c1 { color: #999988; font-style: italic } /* Comment.Single */
|
||||
.codehilite .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */
|
||||
.codehilite .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
|
||||
.codehilite .ge { color: #000000; font-style: italic } /* Generic.Emph */
|
||||
.codehilite .gr { color: #aa0000 } /* Generic.Error */
|
||||
.codehilite .gh { color: #999999 } /* Generic.Heading */
|
||||
.codehilite .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
|
||||
.codehilite .go { color: #888888 } /* Generic.Output */
|
||||
.codehilite .gp { color: #555555 } /* Generic.Prompt */
|
||||
.codehilite .gs { font-weight: bold } /* Generic.Strong */
|
||||
.codehilite .gu { color: #aaaaaa } /* Generic.Subheading */
|
||||
.codehilite .gt { color: #aa0000 } /* Generic.Traceback */
|
||||
.codehilite .kc { color: #000000; font-weight: bold } /* Keyword.Constant */
|
||||
.codehilite .kd { color: #000000; font-weight: bold } /* Keyword.Declaration */
|
||||
.codehilite .kn { color: #000000; font-weight: bold } /* Keyword.Namespace */
|
||||
.codehilite .kp { color: #000000; font-weight: bold } /* Keyword.Pseudo */
|
||||
.codehilite .kr { color: #000000; font-weight: bold } /* Keyword.Reserved */
|
||||
.codehilite .kt { color: #445588; font-weight: bold } /* Keyword.Type */
|
||||
.codehilite .m { color: #009999 } /* Literal.Number */
|
||||
.codehilite .s { color: #d01040 } /* Literal.String */
|
||||
.codehilite .na { color: #008080 } /* Name.Attribute */
|
||||
.codehilite .nb { color: #0086B3 } /* Name.Builtin */
|
||||
.codehilite .nc { color: #445588; font-weight: bold } /* Name.Class */
|
||||
.codehilite .no { color: #008080 } /* Name.Constant */
|
||||
.codehilite .nd { color: #3c5d5d; font-weight: bold } /* Name.Decorator */
|
||||
.codehilite .ni { color: #800080 } /* Name.Entity */
|
||||
.codehilite .ne { color: #990000; font-weight: bold } /* Name.Exception */
|
||||
.codehilite .nf { color: #990000; font-weight: bold } /* Name.Function */
|
||||
.codehilite .nl { color: #990000; font-weight: bold } /* Name.Label */
|
||||
.codehilite .nn { color: #555555 } /* Name.Namespace */
|
||||
.codehilite .nt { color: #000080 } /* Name.Tag */
|
||||
.codehilite .nv { color: #008080 } /* Name.Variable */
|
||||
.codehilite .ow { color: #000000; font-weight: bold } /* Operator.Word */
|
||||
.codehilite .w { color: #bbbbbb } /* Text.Whitespace */
|
||||
.codehilite .mf { color: #009999 } /* Literal.Number.Float */
|
||||
.codehilite .mh { color: #009999 } /* Literal.Number.Hex */
|
||||
.codehilite .mi { color: #009999 } /* Literal.Number.Integer */
|
||||
.codehilite .mo { color: #009999 } /* Literal.Number.Oct */
|
||||
.codehilite .sb { color: #d01040 } /* Literal.String.Backtick */
|
||||
.codehilite .sc { color: #d01040 } /* Literal.String.Char */
|
||||
.codehilite .sd { color: #d01040 } /* Literal.String.Doc */
|
||||
.codehilite .s2 { color: #d01040 } /* Literal.String.Double */
|
||||
.codehilite .se { color: #d01040 } /* Literal.String.Escape */
|
||||
.codehilite .sh { color: #d01040 } /* Literal.String.Heredoc */
|
||||
.codehilite .si { color: #d01040 } /* Literal.String.Interpol */
|
||||
.codehilite .sx { color: #d01040 } /* Literal.String.Other */
|
||||
.codehilite .sr { color: #009926 } /* Literal.String.Regex */
|
||||
.codehilite .s1 { color: #d01040 } /* Literal.String.Single */
|
||||
.codehilite .ss { color: #990073 } /* Literal.String.Symbol */
|
||||
.codehilite .bp { color: #999999 } /* Name.Builtin.Pseudo */
|
||||
.codehilite .vc { color: #008080 } /* Name.Variable.Class */
|
||||
.codehilite .vg { color: #008080 } /* Name.Variable.Global */
|
||||
.codehilite .vi { color: #008080 } /* Name.Variable.Instance */
|
||||
.codehilite .il { color: #009999 } /* Literal.Number.Integer.Long */
|
106
doc/conf.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# restic documentation build configuration file, created by
|
||||
# sphinx-quickstart on Fri Apr 14 22:44:43 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
import os
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'restic'
|
||||
copyright = '2017, restic authors'
|
||||
author = 'fd0'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
|
||||
# read version from ../VERSION
|
||||
version = open('../VERSION').readlines()[0]
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
if os.environ.get('READTHEDOCS') == 'True':
|
||||
html_context = {
|
||||
'css_files': [
|
||||
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
|
||||
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
|
||||
'_static/css/restic.css',
|
||||
]
|
||||
}
|
||||
else:
|
||||
# we're not built by rtd => add rtd-theme
|
||||
import sphinx_rtd_theme
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
html_style = 'css/restic.css'
|
||||
|
||||
html_logo = 'logo/logo.png'
|
||||
|
||||
html_favicon = '_static/favicon.ico'
|
||||
|
||||
html_show_version = False
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'resticdoc'
|
610
doc/design.rst
Normal file
@@ -0,0 +1,610 @@
|
||||
Design
|
||||
======
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
|
||||
This section introduces terminology used in this document.
|
||||
|
||||
*Repository*: All data produced during a backup is sent to and stored in
|
||||
a repository in a structured form, for example in a file system
|
||||
hierarchy with several subdirectories. A repository implementation must
|
||||
be able to fulfill a number of operations, e.g. list the contents.
|
||||
|
||||
*Blob*: A Blob combines a number of data bytes with identifying
|
||||
information like the SHA-256 hash of the data and its length.
|
||||
|
||||
*Pack*: A Pack combines one or more Blobs, e.g. in a single file.
|
||||
|
||||
*Snapshot*: A Snapshot stands for the state of a file or directory that
|
||||
has been backed up at some point in time. The state here means the
|
||||
content and meta data like the name and modification time for the file
|
||||
or the directory and its contents.
|
||||
|
||||
*Storage ID*: A storage ID is the SHA-256 hash of the content stored in
|
||||
the repository. This ID is required in order to load the file from the
|
||||
repository.
|
||||
|
||||
Repository Format
|
||||
-----------------
|
||||
|
||||
All data is stored in a restic repository. A repository is able to store
|
||||
data of several different types, which can later be requested based on
|
||||
an ID. This so-called "storage ID" is the SHA-256 hash of the content of
|
||||
a file. All files in a repository are only written once and never
|
||||
modified afterwards. This allows accessing and even writing to the
|
||||
repository with multiple clients in parallel. Only the delete operation
|
||||
removes data from the repository.
|
||||
|
||||
Repositories consist of several directories and a top-level file called
|
||||
``config``. For all other files stored in the repository, the name for
|
||||
the file is the lower case hexadecimal representation of the storage ID,
|
||||
which is the SHA-256 hash of the file's contents. This allows for easy
|
||||
verification of files for accidental modifications, like disk read
|
||||
errors, by simply running the program ``sha256sum`` on the file and
|
||||
comparing its output to the file name. If the prefix of a filename is
|
||||
unique amongst all the other files in the same directory, the prefix may
|
||||
be used instead of the complete filename.
|
||||
|
||||
Apart from the files stored within the ``keys`` directory, all files are
|
||||
encrypted with AES-256 in counter mode (CTR). The integrity of the
|
||||
encrypted data is secured by a Poly1305-AES message authentication code
|
||||
(sometimes also referred to as a "signature").
|
||||
|
||||
In the first 16 bytes of each encrypted file the initialisation vector
|
||||
(IV) is stored. It is followed by the encrypted data and completed by
|
||||
the 16 byte MAC. The format is: ``IV || CIPHERTEXT || MAC``. The
|
||||
complete encryption overhead is 32 bytes. For each file, a new random IV
|
||||
is selected.
|
||||
|
||||
The file ``config`` is encrypted this way and contains a JSON document
|
||||
like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
|
||||
"chunker_polynomial": "25b468838dcb75"
|
||||
}
|
||||
|
||||
After decryption, restic first checks that the version field contains a
|
||||
version number that it understands, otherwise it aborts. At the moment,
|
||||
the version is expected to be 1. The field ``id`` holds a unique ID
|
||||
which consists of 32 random bytes, encoded in hexadecimal. This uniquely
|
||||
identifies the repository, regardless if it is accessed via SFTP or
|
||||
locally. The field ``chunker_polynomial`` contains a parameter that is
|
||||
used for splitting large files into smaller chunks (see below).
|
||||
|
||||
Repository Layout
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ``local`` and ``sftp`` backends are implemented using files and
|
||||
directories stored in a file system. The directory layout is the same
|
||||
for both backend types.
|
||||
|
||||
The basic layout of a repository stored in a ``local`` or ``sftp``
|
||||
backend is shown here:
|
||||
|
||||
::
|
||||
|
||||
/tmp/restic-repo
|
||||
├── config
|
||||
├── data
|
||||
│ ├── 21
|
||||
│ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
||||
│ ├── 32
|
||||
│ │ └── 32ea976bc30771cebad8285cd99120ac8786f9ffd42141d452458089985043a5
|
||||
│ ├── 59
|
||||
│ │ └── 59fe4bcde59bd6222eba87795e35a90d82cd2f138a27b6835032b7b58173a426
|
||||
│ ├── 73
|
||||
│ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
||||
│ [...]
|
||||
├── index
|
||||
│ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
||||
│ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
||||
├── keys
|
||||
│ └── b02de829beeb3c01a63e6b25cbd421a98fef144f03b9a02e46eff9e2ca3f0bd7
|
||||
├── locks
|
||||
├── snapshots
|
||||
│ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||
└── tmp
|
||||
|
||||
A local repository can be initialized with the ``restic init`` command,
|
||||
e.g.:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo init
|
||||
|
||||
The local and sftp backends will auto-detect and accept all layouts described
|
||||
in the following sections, so that remote repositories mounted locally e.g. via
|
||||
fuse can be accessed. The layout auto-detection can be overridden by specifying
|
||||
the option ``-o local.layout=default``, valid values are ``default`` and
|
||||
``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the
|
||||
s3 backend ``s3.layout``.
|
||||
|
||||
S3 Legacy Layout
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Unfortunately during development the AWS S3 backend uses slightly different
|
||||
paths (directory names use singular instead of plural for ``key``,
|
||||
``lock``, and ``snapshot`` files), and the data files are stored directly below
|
||||
the ``data`` directory. The S3 Legacy repository layout looks like this:
|
||||
|
||||
::
|
||||
|
||||
/config
|
||||
/data
|
||||
├── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
||||
├── 32ea976bc30771cebad8285cd99120ac8786f9ffd42141d452458089985043a5
|
||||
├── 59fe4bcde59bd6222eba87795e35a90d82cd2f138a27b6835032b7b58173a426
|
||||
├── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
||||
[...]
|
||||
/index
|
||||
├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
||||
└── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
||||
/key
|
||||
└── b02de829beeb3c01a63e6b25cbd421a98fef144f03b9a02e46eff9e2ca3f0bd7
|
||||
/lock
|
||||
/snapshot
|
||||
└── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||
|
||||
The S3 backend understands and accepts both forms, new backends are
|
||||
always created with the default layout for compatibility reasons.
|
||||
|
||||
Pack Format
|
||||
-----------
|
||||
|
||||
All files in the repository except Key and Pack files just contain raw
|
||||
data, stored as ``IV || Ciphertext || MAC``. Pack files may contain one
|
||||
or more Blobs of data.
|
||||
|
||||
A Pack's structure is as follows:
|
||||
|
||||
::
|
||||
|
||||
EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length
|
||||
|
||||
At the end of the Pack file is a header, which describes the content.
|
||||
The header is encrypted and authenticated. ``Header_Length`` is the
|
||||
length of the encrypted header encoded as a four byte integer in
|
||||
little-endian encoding. Placing the header at the end of a file allows
|
||||
writing the blobs in a continuous stream as soon as they are read during
|
||||
the backup phase. This reduces code complexity and avoids having to
|
||||
re-write a file once the pack is complete and the content and length of
|
||||
the header is known.
|
||||
|
||||
All the blobs (``EncryptedBlob1``, ``EncryptedBlobN`` etc.) are
|
||||
authenticated and encrypted independently. This enables repository
|
||||
reorganisation without having to touch the encrypted Blobs. In addition
|
||||
it also allows efficient indexing, for only the header needs to be read
|
||||
in order to find out which Blobs are contained in the Pack. Since the
|
||||
header is authenticated, authenticity of the header can be checked
|
||||
without having to read the complete Pack.
|
||||
|
||||
After decryption, a Pack's header consists of the following elements:
|
||||
|
||||
::
|
||||
|
||||
Type_Blob1 || Length(EncryptedBlob1) || Hash(Plaintext_Blob1) ||
|
||||
[...]
|
||||
Type_BlobN || Length(EncryptedBlobN) || Hash(Plaintext_Blobn) ||
|
||||
|
||||
This is enough to calculate the offsets for all the Blobs in the Pack.
|
||||
Length is the length of a Blob as a four byte integer in little-endian
|
||||
format. The type field is a one byte field and labels the content of a
|
||||
blob according to the following table:
|
||||
|
||||
+--------+-----------+
|
||||
| Type | Meaning |
|
||||
+========+===========+
|
||||
| 0 | data |
|
||||
+--------+-----------+
|
||||
| 1 | tree |
|
||||
+--------+-----------+
|
||||
|
||||
All other types are invalid, more types may be added in the future.
|
||||
|
||||
For reconstructing the index or parsing a pack without an index, first
|
||||
the last four bytes must be read in order to find the length of the
|
||||
header. Afterwards, the header can be read and parsed, which yields all
|
||||
plaintext hashes, types, offsets and lengths of all included blobs.
|
||||
|
||||
Indexing
|
||||
--------
|
||||
|
||||
Index files contain information about Data and Tree Blobs and the Packs
|
||||
they are contained in and store this information in the repository. When
|
||||
the local cached index is not accessible any more, the index files can
|
||||
be downloaded and used to reconstruct the index. The files are encrypted
|
||||
and authenticated like Data and Tree Blobs, so the outer structure is
|
||||
``IV || Ciphertext || MAC`` again. The plaintext consists of a JSON
|
||||
document like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"supersedes": [
|
||||
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
|
||||
],
|
||||
"packs": [
|
||||
{
|
||||
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
|
||||
"blobs": [
|
||||
{
|
||||
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
|
||||
"type": "data",
|
||||
"offset": 0,
|
||||
"length": 25
|
||||
},{
|
||||
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
|
||||
"type": "tree",
|
||||
"offset": 38,
|
||||
"length": 100
|
||||
},
|
||||
{
|
||||
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
|
||||
"type": "data",
|
||||
"offset": 150,
|
||||
"length": 123
|
||||
}
|
||||
]
|
||||
}, [...]
|
||||
]
|
||||
}
|
||||
|
||||
This JSON document lists Packs and the blobs contained therein. In this
|
||||
example, the Pack ``73d04e61`` contains two data Blobs and one Tree
|
||||
blob, the plaintext hashes are listed afterwards.
|
||||
|
||||
The field ``supersedes`` lists the storage IDs of index files that have
|
||||
been replaced with the current index file. This happens when index files
|
||||
are repacked, for example when old snapshots are removed and Packs are
|
||||
recombined.
|
||||
|
||||
There may be an arbitrary number of index files, containing information
|
||||
on non-disjoint sets of Packs. The number of packs described in a single
|
||||
file is chosen so that the file size is kept below 8 MiB.
|
||||
|
||||
Keys, Encryption and MAC
|
||||
------------------------
|
||||
|
||||
All data stored by restic in the repository is encrypted with AES-256 in
|
||||
counter mode and authenticated using Poly1305-AES. For encrypting new
|
||||
data first 16 bytes are read from a cryptographically secure
|
||||
pseudorandom number generator as a random nonce. This is used both as
|
||||
the IV for counter mode and the nonce for Poly1305. This operation needs
|
||||
three keys: A 32 byte for AES-256 for encryption, a 16 byte AES key and
|
||||
a 16 byte key for Poly1305. For details see the original paper `The
|
||||
Poly1305-AES message-authentication
|
||||
code <http://cr.yp.to/mac/poly1305-20050329.pdf>`__ by Dan Bernstein.
|
||||
The data is then encrypted with AES-256 and afterwards a message
|
||||
authentication code (MAC) is computed over the ciphertext, everything is
|
||||
then stored as IV \|\| CIPHERTEXT \|\| MAC.
|
||||
|
||||
The directory ``keys`` contains key files. These are simple JSON
|
||||
documents which contain all data that is needed to derive the
|
||||
repository's master encryption and message authentication keys from a
|
||||
user's password. The JSON document from the repository can be
|
||||
pretty-printed for example by using the Python module ``json``
|
||||
(shortened to increase readability):
|
||||
|
||||
::
|
||||
|
||||
$ python -mjson.tool /tmp/restic-repo/keys/b02de82*
|
||||
{
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0"
|
||||
"kdf": "scrypt",
|
||||
"N": 65536,
|
||||
"r": 8,
|
||||
"p": 1,
|
||||
"created": "2015-01-02T18:10:13.48307196+01:00",
|
||||
"data": "tGwYeKoM0C4j4/9DFrVEmMGAldvEn/+iKC3te/QE/6ox/V4qz58FUOgMa0Bb1cIJ6asrypCx/Ti/pRXCPHLDkIJbNYd2ybC+fLhFIJVLCvkMS+trdywsUkglUbTbi+7+Ldsul5jpAj9vTZ25ajDc+4FKtWEcCWL5ICAOoTAxnPgT+Lh8ByGQBH6KbdWabqamLzTRWxePFoYuxa7yXgmj9A==",
|
||||
"salt": "uW4fEI1+IOzj7ED9mVor+yTSJFd68DGlGOeLgJELYsTU5ikhG/83/+jGd4KKAaQdSrsfzrdOhAMftTSih5Ux6w==",
|
||||
}
|
||||
|
||||
When the repository is opened by restic, the user is prompted for the
|
||||
repository password. This is then used with ``scrypt``, a key derivation
|
||||
function (KDF), and the supplied parameters (``N``, ``r``, ``p`` and
|
||||
``salt``) to derive 64 key bytes. The first 32 bytes are used as the
|
||||
encryption key (for AES-256) and the last 32 bytes are used as the
|
||||
message authentication key (for Poly1305-AES). These last 32 bytes are
|
||||
divided into a 16 byte AES key ``k`` followed by 16 bytes of secret key
|
||||
``r``. The key ``r`` is then masked for use with Poly1305 (see the paper
|
||||
for details).
|
||||
|
||||
Those message authentication keys (``k`` and ``r``) are used to compute
|
||||
a MAC over the bytes contained in the JSON field ``data`` (after
|
||||
removing the Base64 encoding and not including the last 32 byte). If the
|
||||
password is incorrect or the key file has been tampered with, the
|
||||
computed MAC will not match the last 16 bytes of the data, and restic
|
||||
exits with an error. Otherwise, the data is decrypted with the
|
||||
encryption key derived from ``scrypt``. This yields a JSON document
|
||||
which contains the master encryption and message authentication keys for
|
||||
this repository (encoded in Base64). The command
|
||||
``restic cat masterkey`` can be used as follows to decrypt and
|
||||
pretty-print the master key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat masterkey
|
||||
{
|
||||
"mac": {
|
||||
"k": "evFWd9wWlndL9jc501268g==",
|
||||
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
|
||||
},
|
||||
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
|
||||
}
|
||||
|
||||
All data in the repository is encrypted and authenticated with these
|
||||
master keys. For encryption, the AES-256 algorithm in Counter mode is
|
||||
used. For message authentication, Poly1305-AES is used as described
|
||||
above.
|
||||
|
||||
A repository can have several different passwords, with a key file for
|
||||
each. This way, the password can be changed without having to re-encrypt
|
||||
all data.
|
||||
|
||||
Snapshots
|
||||
---------
|
||||
|
||||
A snapshot represents a directory with all files and sub-directories at
|
||||
a given point in time. For each backup that is made, a new snapshot is
|
||||
created. A snapshot is a JSON document that is stored in an encrypted
|
||||
file below the directory ``snapshots`` in the repository. The filename
|
||||
is the storage ID. This string is unique and used within restic to
|
||||
uniquely identify a snapshot.
|
||||
|
||||
The command ``restic cat snapshot`` can be used as follows to decrypt
|
||||
and pretty-print the contents of a snapshot file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat snapshot 251c2e58
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-01-02T18:10:50.895208559+01:00",
|
||||
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
|
||||
"dir": "/tmp/testdata",
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"tags": [
|
||||
"NL"
|
||||
]
|
||||
}
|
||||
|
||||
Here it can be seen that this snapshot represents the contents of the
|
||||
directory ``/tmp/testdata``. The most important field is ``tree``. When
|
||||
the meta data (e.g. the tags) of a snapshot change, the snapshot needs
|
||||
to be re-encrypted and saved. This will change the storage ID, so in
|
||||
order to relate these seemingly different snapshots, a field
|
||||
``original`` is introduced which contains the ID of the original
|
||||
snapshot, e.g. after adding the tag ``DE`` to the snapshot above it
|
||||
becomes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat snapshot 22a5af1b
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-01-02T18:10:50.895208559+01:00",
|
||||
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
|
||||
"dir": "/tmp/testdata",
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"tags": [
|
||||
"NL",
|
||||
"DE"
|
||||
],
|
||||
"original": "251c2e5841355f743f9d4ffd3260bee765acee40a6229857e32b60446991b837"
|
||||
}
|
||||
|
||||
Once introduced, the ``original`` field is not modified when the
|
||||
snapshot's meta data is changed again.
|
||||
|
||||
All content within a restic repository is referenced according to its
|
||||
SHA-256 hash. Before saving, each file is split into variable sized
|
||||
Blobs of data. The SHA-256 hashes of all Blobs are saved in an ordered
|
||||
list which then represents the content of the file.
|
||||
|
||||
In order to relate these plaintext hashes to the actual location within
|
||||
a Pack file , an index is used. If the index is not available, the
|
||||
header of all data Blobs can be read.
|
||||
|
||||
Trees and Data
|
||||
--------------
|
||||
|
||||
A snapshot references a tree by the SHA-256 hash of the JSON string
|
||||
representation of its contents. Trees and data are saved in pack files
|
||||
in a subdirectory of the directory ``data``.
|
||||
|
||||
The command ``restic cat blob`` can be used to inspect the tree
|
||||
referenced above (piping the output of the command to ``jq .`` so that
|
||||
the JSON is indented):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat blob 2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf | jq .
|
||||
enter password for repository:
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"name": "testdata",
|
||||
"type": "dir",
|
||||
"mode": 493,
|
||||
"mtime": "2014-12-22T14:47:59.912418701+01:00",
|
||||
"atime": "2014-12-06T17:49:21.748468803+01:00",
|
||||
"ctime": "2014-12-22T14:47:59.912418701+01:00",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"user": "fd0",
|
||||
"inode": 409704562,
|
||||
"content": null,
|
||||
"subtree": "b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
A tree contains a list of entries (in the field ``nodes``) which contain
|
||||
meta data like a name and timestamps. When the entry references a
|
||||
directory, the field ``subtree`` contains the plain text ID of another
|
||||
tree object.
|
||||
|
||||
When the command ``restic cat blob`` is used, the plaintext ID is needed
|
||||
to print a tree. The tree referenced above can be dumped as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat blob b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc
|
||||
enter password for repository:
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"name": "testfile",
|
||||
"type": "file",
|
||||
"mode": 420,
|
||||
"mtime": "2014-12-06T17:50:23.34513538+01:00",
|
||||
"atime": "2014-12-06T17:50:23.338468713+01:00",
|
||||
"ctime": "2014-12-06T17:50:23.34513538+01:00",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"user": "fd0",
|
||||
"inode": 416863351,
|
||||
"size": 1234,
|
||||
"links": 1,
|
||||
"content": [
|
||||
"50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d"
|
||||
]
|
||||
},
|
||||
[...]
|
||||
]
|
||||
}
|
||||
|
||||
This tree contains a file entry. This time, the ``subtree`` field is not
|
||||
present and the ``content`` field contains a list with one plain text
|
||||
SHA-256 hash.
|
||||
|
||||
The command ``restic cat blob`` can also be used to extract and decrypt
|
||||
data given a plaintext ID, e.g. for the data mentioned above:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
|
||||
enter password for repository:
|
||||
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
|
||||
|
||||
As can be seen from the output of the program ``sha256sum``, the hash
|
||||
matches the plaintext hash from the map included in the tree above, so
|
||||
the correct data has been returned.
|
||||
|
||||
Locks
|
||||
-----
|
||||
|
||||
The restic repository structure is designed in a way that allows
|
||||
parallel access of multiple instance of restic and even parallel writes.
|
||||
However, there are some functions that work more efficient or even
|
||||
require exclusive access of the repository. In order to implement these
|
||||
functions, restic processes are required to create a lock on the
|
||||
repository before doing anything.
|
||||
|
||||
Locks come in two types: Exclusive and non-exclusive locks. At most one
|
||||
process can have an exclusive lock on the repository, and during that
|
||||
time there must not be any other locks (exclusive and non-exclusive).
|
||||
There may be multiple non-exclusive locks in parallel.
|
||||
|
||||
A lock is a file in the subdir ``locks`` whose filename is the storage
|
||||
ID of the contents. It is encrypted and authenticated the same way as
|
||||
other files in the repository and contains the following JSON structure:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"time": "2015-06-27T12:18:51.759239612+02:00",
|
||||
"exclusive": false,
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"pid": 13607,
|
||||
"uid": 1000,
|
||||
"gid": 100
|
||||
}
|
||||
|
||||
The field ``exclusive`` defines the type of lock. When a new lock is to
|
||||
be created, restic checks all locks in the repository. When a lock is
|
||||
found, it is tested if the lock is stale, which is the case for locks
|
||||
with timestamps older than 30 minutes. If the lock was created on the
|
||||
same machine, even for younger locks it is tested whether the process is
|
||||
still alive by sending a signal to it. If that fails, restic assumes
|
||||
that the process is dead and considers the lock to be stale.
|
||||
|
||||
When a new lock is to be created and no other conflicting locks are
|
||||
detected, restic creates a new lock, waits, and checks if other locks
|
||||
appeared in the repository. Depending on the type of the other locks and
|
||||
the lock to be created, restic either continues or fails.
|
||||
|
||||
Backups and Deduplication
|
||||
-------------------------
|
||||
|
||||
For creating a backup, restic scans the source directory for all files,
|
||||
sub-directories and other entries. The data from each file is split into
|
||||
variable length Blobs cut at offsets defined by a sliding window of 64
|
||||
byte. The implementation uses Rabin Fingerprints for implementing this
|
||||
Content Defined Chunking (CDC). An irreducible polynomial is selected at
|
||||
random and saved in the file ``config`` when a repository is
|
||||
initialized, so that watermark attacks are much harder.
|
||||
|
||||
Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB
|
||||
in size. The implementation aims for 1 MiB Blob size on average.
|
||||
|
||||
For modified files, only modified Blobs have to be saved in a subsequent
|
||||
backup. This even works if bytes are inserted or removed at arbitrary
|
||||
positions within the file.
|
||||
|
||||
Threat Model
|
||||
------------
|
||||
|
||||
The design goals for restic include being able to securely store backups
|
||||
in a location that is not completely trusted, e.g. a shared system where
|
||||
others can potentially access the files or (in the case of the system
|
||||
administrator) even modify or delete them.
|
||||
|
||||
General assumptions:
|
||||
|
||||
- The host system a backup is created on is trusted. This is the most
|
||||
basic requirement, and essential for creating trustworthy backups.
|
||||
|
||||
The restic backup program guarantees the following:
|
||||
|
||||
- Accessing the unencrypted content of stored files and metadata should
|
||||
not be possible without a password for the repository. Everything
|
||||
except the metadata included for informational purposes in the key
|
||||
files is encrypted and authenticated.
|
||||
|
||||
- Modifications (intentional or unintentional) can be detected
|
||||
automatically on several layers:
|
||||
|
||||
1. For all accesses of data stored in the repository it is checked
|
||||
whether the cryptographic hash of the contents matches the storage
|
||||
ID (the file's name). This way, modifications (bad RAM, broken
|
||||
harddisk) can be detected easily.
|
||||
|
||||
2. Before decrypting any data, the MAC on the encrypted data is
|
||||
checked. If there has been a modification, the MAC check will
|
||||
fail. This step happens even before the data is decrypted, so data
|
||||
that has been tampered with is not decrypted at all.
|
||||
|
||||
However, the restic backup program is not designed to protect against
|
||||
attackers deleting files at the storage location. There is nothing that
|
||||
can be done about this. If this needs to be guaranteed, get a secure
|
||||
location without any access from third parties. If you assume that
|
||||
attackers have write access to your files at the storage location,
|
||||
attackers are able to figure out (e.g. based on the timestamps of the
|
||||
stored files) which files belong to what snapshot. When only these files
|
||||
are deleted, the particular snapshot vanished and all snapshots
|
||||
depending on data that has been added in the snapshot cannot be restored
|
||||
completely. Restic is not designed to detect this attack.
|
69
doc/development.rst
Normal file
@@ -0,0 +1,69 @@
|
||||
Development
|
||||
===========
|
||||
|
||||
Contribute
|
||||
----------
|
||||
Contributions are welcome! Please **open an issue first** (or add a
|
||||
comment to an existing issue) if you plan to work on any code or add a
|
||||
new feature. This way, duplicate work is prevented and we can discuss
|
||||
your ideas and design first.
|
||||
|
||||
More information and a description of the development environment can be
|
||||
found in `CONTRIBUTING.md <https://github.com/restic/restic/blob/master/CONTRIBUTING.md>`__.
|
||||
A document describing the design of restic and the data structures stored on the
|
||||
back end is contained in `Design <https://restic.readthedocs.io/en/latest/design.html>`__.
|
||||
|
||||
If you'd like to start contributing to restic, but don't know exactly
|
||||
what do to, have a look at this great article by Dave Cheney:
|
||||
`Suggestions for contributing to an Open Source
|
||||
project <http://dave.cheney.net/2016/03/12/suggestions-for-contributing-to-an-open-source-project>`__
|
||||
A few issues have been tagged with the label ``help wanted``, you can
|
||||
start looking at those:
|
||||
https://github.com/restic/restic/labels/help%20wanted
|
||||
|
||||
Security
|
||||
--------
|
||||
**Important**: If you discover something that you believe to be a
|
||||
possible critical security problem, please do *not* open a GitHub issue
|
||||
but send an email directly to alexander@bumpern.de. If possible, please
|
||||
encrypt your email using the following PGP key
|
||||
(`0x91A6868BD3F7A907 <https://pgp.mit.edu/pks/lookup?op=get&search=0xCF8F18F2844575973F79D4E191A6868BD3F7A907>`__):
|
||||
|
||||
::
|
||||
|
||||
pub 4096R/91A6868BD3F7A907 2014-11-01
|
||||
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
|
||||
uid Alexander Neumann <alexander@bumpern.de>
|
||||
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
Backward compatibility for backups is important so that our users are
|
||||
always able to restore saved data. Therefore restic follows `Semantic
|
||||
Versioning <http://semver.org>`__ to clearly define which versions are
|
||||
compatible. The repository and data structures contained therein are
|
||||
considered the "Public API" in the sense of Semantic Versioning. This
|
||||
goes for all released versions of restic, this may not be the case for
|
||||
the master branch.
|
||||
|
||||
We guarantee backward compatibility of all repositories within one major
|
||||
version; as long as we do not increment the major version, data can be
|
||||
read and restored. We strive to be fully backward compatible to all
|
||||
prior versions.
|
||||
|
||||
Building documentation
|
||||
----------------------
|
||||
|
||||
The restic documentation is built with `Sphinx <http://sphinx-doc.org>`__,
|
||||
therefore building it locally requires a recent Python version and requirements listed in ``doc/requirements.txt``.
|
||||
This example will guide you through the process using `virtualenv <https://virtualenv.pypa.io>`__:
|
||||
|
||||
::
|
||||
|
||||
$ virtualenv venv # create virtual python environment
|
||||
$ source venv/bin/activate # activate the virtual environment
|
||||
$ cd doc
|
||||
$ pip install -r requirements.txt # install dependencies
|
||||
$ make html # build html documentation
|
||||
$ # open _build/html/index.html with your favorite browser
|
@@ -3,10 +3,15 @@ FAQ
|
||||
|
||||
This is the list of Frequently Asked Questions for restic.
|
||||
|
||||
`restic check` reports packs that aren't referenced in any index, is my repository broken?
|
||||
------------------------------------------------------------------------------------------
|
||||
``restic check`` reports packs that aren't referenced in any index, is my repository broken?
|
||||
--------------------------------------------------------------------------------------------
|
||||
|
||||
When `restic check` reports that there are pack files in the repository that are not referenced in any index, that's (in contrast to what restic reports at the moment) not a source for concern. The output looks like this:
|
||||
When ``restic check`` reports that there are pack files in the
|
||||
repository that are not referenced in any index, that's (in contrast to
|
||||
what restic reports at the moment) not a source for concern. The output
|
||||
looks like this:
|
||||
|
||||
::
|
||||
|
||||
$ restic check
|
||||
Create exclusive lock for repository
|
||||
@@ -17,4 +22,7 @@ When `restic check` reports that there are pack files in the repository that are
|
||||
Check snapshots, trees and blobs
|
||||
Fatal: repository contains errors
|
||||
|
||||
The message means that there is more data stored in the repo than strictly necessary. With high probability this is duplicate data. In order to clean it up, the command `restic prune` can be used. The cause of this bug is not yet known.
|
||||
The message means that there is more data stored in the repo than
|
||||
strictly necessary. With high probability this is duplicate data. In
|
||||
order to clean it up, the command ``restic prune`` can be used. The
|
||||
cause of this bug is not yet known.
|
BIN
doc/images/aws_s3/01_aws_start.png
Normal file
After Width: | Height: | Size: 189 KiB |
BIN
doc/images/aws_s3/02_aws_menu.png
Normal file
After Width: | Height: | Size: 199 KiB |
BIN
doc/images/aws_s3/03_buckets_list_before.png
Normal file
After Width: | Height: | Size: 94 KiB |
BIN
doc/images/aws_s3/04_bucket_create_start.png
Normal file
After Width: | Height: | Size: 112 KiB |
BIN
doc/images/aws_s3/05_bucket_create_review.png
Normal file
After Width: | Height: | Size: 116 KiB |
BIN
doc/images/aws_s3/06_buckets_list_after.png
Normal file
After Width: | Height: | Size: 96 KiB |
BIN
doc/images/aws_s3/07_iam_start.png
Normal file
After Width: | Height: | Size: 181 KiB |
BIN
doc/images/aws_s3/08_user_list.png
Normal file
After Width: | Height: | Size: 97 KiB |
BIN
doc/images/aws_s3/09_user_name.png
Normal file
After Width: | Height: | Size: 121 KiB |
BIN
doc/images/aws_s3/10_user_pre_policy.png
Normal file
After Width: | Height: | Size: 176 KiB |
BIN
doc/images/aws_s3/11_policy_start.png
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
doc/images/aws_s3/12_policy_permissions_done.png
Normal file
After Width: | Height: | Size: 118 KiB |
BIN
doc/images/aws_s3/13_policy_review.png
Normal file
After Width: | Height: | Size: 136 KiB |
BIN
doc/images/aws_s3/14_user_attach_policy.png
Normal file
After Width: | Height: | Size: 125 KiB |
BIN
doc/images/aws_s3/15_user_review.png
Normal file
After Width: | Height: | Size: 104 KiB |
BIN
doc/images/aws_s3/16_user_created.png
Normal file
After Width: | Height: | Size: 115 KiB |
119
doc/index.md
@@ -1,119 +0,0 @@
|
||||
Welcome to restic
|
||||
=================
|
||||
|
||||

|
||||
|
||||
restic is a backup program that is fast, efficient and secure. On the left you
|
||||
can find an overview of the documentation. The project's homepage is
|
||||
<https://restic.github.io>, the source code repository can be found on GitHub
|
||||
at the URL <https://github.com/restic/restic>.
|
||||
|
||||
Building and viewing the documentation
|
||||
--------------------------------------
|
||||
|
||||
The documentation you're currently viewing may not match the version of restic
|
||||
you have installed. If you cloned the repository manually, you can find the
|
||||
right documentation in the directory `doc/`. If you're viewing this online at
|
||||
<https://restic.readthedocs.io>, there is a small menu at the bottom left of
|
||||
this page, where you can select the version.
|
||||
|
||||
The restic documentation is built with [MkDocs](http://www.mkdocs.org). After
|
||||
installing it, you can edit and view the documentation locally by running:
|
||||
|
||||
```console
|
||||
$ mkdocs serve
|
||||
INFO - Building documentation...
|
||||
INFO - Cleaning site directory
|
||||
[I 160221 12:33:57 server:271] Serving on http://127.0.0.1:8000
|
||||
```
|
||||
|
||||
Afterwards visit the URL with a browser.
|
||||
|
||||
Design Principles
|
||||
-----------------
|
||||
|
||||
Restic is a program that does backups right and was designed with the following
|
||||
principles in mind:
|
||||
|
||||
* **Easy:** Doing backups should be a frictionless process, otherwise you might be
|
||||
tempted to skip it. Restic should be easy to configure and use, so that, in
|
||||
the event of a data loss, you can just restore it. Likewise,
|
||||
restoring data should not be complicated.
|
||||
|
||||
* **Fast**: Backing up your data with restic should only be limited by your
|
||||
network or hard disk bandwidth so that you can backup your files every day.
|
||||
Nobody does backups if it takes too much time. Restoring backups should only
|
||||
transfer data that is needed for the files that are to be restored, so that
|
||||
this process is also fast.
|
||||
|
||||
* **Verifiable**: Much more important than backup is restore, so restic enables
|
||||
you to easily verify that all data can be restored.
|
||||
|
||||
* **Secure**: Restic uses cryptography to guarantee confidentiality and integrity
|
||||
of your data. The location the backup data is stored is assumed not to be a
|
||||
trusted environment (e.g. a shared space where others like system
|
||||
administrators are able to access your backups). Restic is built to secure
|
||||
your data against such attackers.
|
||||
|
||||
* **Efficient**: With the growth of data, additional snapshots should only take
|
||||
the storage of the actual increment. Even more, duplicate data should be
|
||||
de-duplicated before it is actually written to the storage back end to save
|
||||
precious backup space.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
Backward compatibility for backups is important so that our users are always
|
||||
able to restore saved data. Therefore restic follows [Semantic
|
||||
Versioning](http://semver.org) to clearly define which versions are compatible.
|
||||
The repository and data structures contained therein are considered the "Public
|
||||
API" in the sense of Semantic Versioning. This goes for all released versions
|
||||
of restic, this may not be the case for the master branch.
|
||||
|
||||
We guarantee backward compatibility of all repositories within one major version;
|
||||
as long as we do not increment the major version, data can be read and restored.
|
||||
We strive to be fully backward compatible to all prior versions.
|
||||
|
||||
Contribute and Documentation
|
||||
----------------------------
|
||||
|
||||
Contributions are welcome! More information can be found in the document
|
||||
[`CONTRIBUTING.md`](https://github.com/restic/restic/blob/master/CONTRIBUTING.md).
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
If you discover a bug, find something surprising or if you would like to
|
||||
discuss or ask something, please [open a github
|
||||
issue](https://github.com/restic/restic/issues/new). If you would like to chat
|
||||
about restic, there is also the IRC channel #restic on irc.freenode.net.
|
||||
|
||||
**Important**: If you discover something that you believe to be a possible
|
||||
critical security problem, please do *not* open a GitHub issue but send an
|
||||
email directly to alexander@bumpern.de. If possible, please encrypt your email
|
||||
using the following PGP key
|
||||
([0x91A6868BD3F7A907](https://pgp.mit.edu/pks/lookup?op=get&search=0xCF8F18F2844575973F79D4E191A6868BD3F7A907)):
|
||||
|
||||
```
|
||||
pub 4096R/91A6868BD3F7A907 2014-11-01
|
||||
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
|
||||
uid Alexander Neumann <alexander@bumpern.de>
|
||||
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
|
||||
```
|
||||
|
||||
Talks
|
||||
-----
|
||||
|
||||
The following talks will be or have been given about restic:
|
||||
|
||||
* 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016, Brussels, Belgium
|
||||
* 2016-01-29: [restic - Backups mal richtig](https://media.ccc.de/v/c4.openchaos.2016.01.restic): Public lecture in German at [CCC Cologne e.V.](https://koeln.ccc.de) in Cologne, Germany
|
||||
* 2015-08-23: [A Solution to the Backup Inconvenience](https://programm.froscon.de/2015/events/1515.html): Lecture at [FROSCON 2015](https://www.froscon.de) in Bonn, Germany
|
||||
* 2015-02-01: [Lightning Talk at FOSDEM 2015](https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s): A short introduction (with slightly outdated command line)
|
||||
* 2015-01-27: [Talk about restic at CCC Aachen](https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content) (in German)
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
Restic is licensed under "BSD 2-Clause License". You can find the complete text
|
||||
in the file `LICENSE`.
|
15
doc/index.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Restic Documentation
|
||||
====================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
installation
|
||||
manual
|
||||
faq
|
||||
tutorials
|
||||
development
|
||||
references
|
||||
talks
|
||||
|
||||
.. include:: ../README.rst
|
68
doc/installation.rst
Normal file
@@ -0,0 +1,68 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
Packages
|
||||
--------
|
||||
|
||||
Mac OS X
|
||||
~~~~~~~~~
|
||||
|
||||
If you are using Mac OS X, you can install restic using the
|
||||
`homebrew <http://brew.sh/>`__ packet manager:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ brew tap restic/restic
|
||||
$ brew install restic
|
||||
|
||||
archlinux
|
||||
~~~~~~~~~
|
||||
|
||||
On archlinux, there is a package called ``restic-git`` which can be
|
||||
installed from AUR, e.g. with ``pacaur``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pacaur -S restic-git
|
||||
|
||||
Pre-compiled Binary
|
||||
-------------------
|
||||
|
||||
You can download the latest pre-compiled binary from the `restic release
|
||||
page <https://github.com/restic/restic/releases/latest>`__.
|
||||
|
||||
From Source
|
||||
-----------
|
||||
|
||||
restic is written in the Go programming language and you need at least
|
||||
Go version 1.7. Building restic may also work with older versions of Go,
|
||||
but that's not supported. See the `Getting
|
||||
started <https://golang.org/doc/install>`__ guide of the Go project for
|
||||
instructions how to install Go.
|
||||
|
||||
In order to build restic from source, execute the following steps:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone https://github.com/restic/restic
|
||||
[...]
|
||||
|
||||
$ cd restic
|
||||
|
||||
$ go run build.go
|
||||
|
||||
You can easily cross-compile restic for all supported platforms, just
|
||||
supply the target OS and platform via the command-line options like this
|
||||
(for Windows and FreeBSD respectively):
|
||||
|
||||
::
|
||||
|
||||
$ go run build.go --goos windows --goarch amd64
|
||||
|
||||
$ go run build.go --goos freebsd --goarch 386
|
||||
|
||||
The resulting binary is statically linked and does not require any
|
||||
libraries.
|
||||
|
||||
At the moment, the only tested compiler for restic is the official Go
|
||||
compiler. Building restic with gccgo may work, but is not supported.
|
1081
doc/manual.rst
Normal file
9
doc/references.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
==========
|
||||
References
|
||||
==========
|
||||
|
||||
.. include:: design.rst
|
||||
|
||||
------------------------
|
||||
|
||||
.. include:: rest_backend.rst
|
2
doc/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
sphinx
|
||||
sphinx_rtd_theme
|
84
doc/rest_backend.rst
Normal file
@@ -0,0 +1,84 @@
|
||||
REST Backend
|
||||
============
|
||||
|
||||
Restic can interact with HTTP Backend that respects the following REST
|
||||
API. The following values are valid for ``{type}``: ``data``, ``keys``,
|
||||
``locks``, ``snapshots``, ``index``, ``config``. ``{path}`` is a path to
|
||||
the repository, so that multiple different repositories can be accessed.
|
||||
The default path is ``/``.
|
||||
|
||||
POST {path}?create=true
|
||||
-----------------------
|
||||
|
||||
This request is used to initially create a new repository. The server
|
||||
responds with "200 OK" if the repository structure was created
|
||||
successfully or already exists, otherwise an error is returned.
|
||||
|
||||
DELETE {path}
|
||||
-------------
|
||||
|
||||
Deletes the repository on the server side. The server responds with "200
|
||||
OK" if the repository was successfully removed. If this function is not
|
||||
implemented the server returns "501 Not Implemented", if this it is
|
||||
denied by the server it returns "403 Forbidden".
|
||||
|
||||
HEAD {path}/config
|
||||
------------------
|
||||
|
||||
Returns "200 OK" if the repository has a configuration, an HTTP error
|
||||
otherwise.
|
||||
|
||||
GET {path}/config
|
||||
-----------------
|
||||
|
||||
Returns the content of the configuration file if the repository has a
|
||||
configuration, an HTTP error otherwise.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/config
|
||||
------------------
|
||||
|
||||
Returns "200 OK" if the configuration of the request body has been
|
||||
saved, an HTTP error otherwise.
|
||||
|
||||
GET {path}/{type}/
|
||||
------------------
|
||||
|
||||
Returns a JSON array containing the names of all the blobs stored for a
|
||||
given type.
|
||||
|
||||
Response format: JSON
|
||||
|
||||
HEAD {path}/{type}/{name}
|
||||
-------------------------
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type is stored in
|
||||
the repository, "404 not found" otherwise. If the blob exists, the HTTP
|
||||
header ``Content-Length`` is set to the file size.
|
||||
|
||||
GET {path}/{type}/{name}
|
||||
------------------------
|
||||
|
||||
Returns the content of the blob with the given name and type if it is
|
||||
stored in the repository, "404 not found" otherwise.
|
||||
|
||||
If the request specifies a partial read with a Range header field, then
|
||||
the status code of the response is 206 instead of 200 and the response
|
||||
only contains the specified range.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/{type}/{name}
|
||||
-------------------------
|
||||
|
||||
Saves the content of the request body as a blob with the given name and
|
||||
type, an HTTP error otherwise.
|
||||
|
||||
Request format: binary/octet-stream
|
||||
|
||||
DELETE {path}/{type}/{name}
|
||||
---------------------------
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type has been
|
||||
deleted from the repository, an HTTP error otherwise.
|
20
doc/talks.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Talks
|
||||
=====
|
||||
|
||||
The following talks will be or have been given about restic:
|
||||
|
||||
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
|
||||
Brussels, Belgium
|
||||
- 2016-01-29: `restic - Backups mal
|
||||
richtig <https://media.ccc.de/v/c4.openchaos.2016.01.restic>`__:
|
||||
Public lecture in German at `CCC Cologne
|
||||
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
|
||||
- 2015-08-23: `A Solution to the Backup
|
||||
Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
|
||||
Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
|
||||
- 2015-02-01: `Lightning Talk at FOSDEM
|
||||
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
|
||||
short introduction (with slightly outdated command line)
|
||||
- 2015-01-27: `Talk about restic at CCC
|
||||
Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
|
||||
(in German)
|
255
doc/tutorial_aws_s3.rst
Normal file
@@ -0,0 +1,255 @@
|
||||
Setting up restic with Amazon S3
|
||||
================================
|
||||
|
||||
Preface
|
||||
-------
|
||||
|
||||
This tutorial will show you how to use restic with AWS S3. It will show you how
|
||||
to navigate the AWS web interface, create an S3 bucket, create a user with
|
||||
access to only this bucket, and finally how to connect restic to this bucket.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
You should already have a ``restic`` binary available on your system that you can
|
||||
run. Furthermore, you should also have an account with
|
||||
`AWS <https://aws.amazon.com/>`__. You will likely need to provide credit card
|
||||
details for billing purposes, even if you use their
|
||||
`free-tier <https://aws.amazon.com/free/>`__.
|
||||
|
||||
|
||||
Logging into AWS
|
||||
----------------
|
||||
|
||||
Point your browser to
|
||||
https://console.aws.amazon.com
|
||||
and log in using your AWS account. You will be presented with the AWS homepage:
|
||||
|
||||
.. image:: images/aws_s3/01_aws_start.png
|
||||
:alt: AWS Homepage
|
||||
|
||||
By using the "Services" button in the upper left corder, a menu of all services
|
||||
provided by AWS can be opened:
|
||||
|
||||
.. image:: images/aws_s3/02_aws_menu.png
|
||||
:alt: AWS Services Menu
|
||||
|
||||
For this tutorial, the Simple Storage Service (S3), as well as Identity and
|
||||
Access Management (IAM) are relevant.
|
||||
|
||||
|
||||
Creating the bucket
|
||||
-------------------
|
||||
|
||||
First, a bucket to store your backups in must be created. Using the "Services"
|
||||
menu, navigate to S3. In case you already have some S3 buckets, you will see a
|
||||
list of them here:
|
||||
|
||||
.. image:: images/aws_s3/03_buckets_list_before.png
|
||||
:alt: List of S3 Buckets
|
||||
|
||||
Click the "Create bucket" button and choose a name and region for your new
|
||||
bucket. For the purpose of this tutorial, the bucket will be named
|
||||
``restic-demo`` and reside in Frankfurt. Because the bucket name space is
|
||||
shared among all AWS users, the name ``restic-demo`` may not be available to
|
||||
you. Be creative and choose a unique bucket name.
|
||||
|
||||
.. image:: images/aws_s3/04_bucket_create_start.png
|
||||
:alt: Create a Bucket
|
||||
|
||||
It is not necessary to configure any special properties or permissions of the
|
||||
bucket just yet. Therefore, just finish the wizard without making any further
|
||||
changes:
|
||||
|
||||
.. image:: images/aws_s3/05_bucket_create_review.png
|
||||
:alt: Review Bucket Creation
|
||||
|
||||
The newly created ``restic-demo`` bucket will now appear on the list of S3
|
||||
buckets:
|
||||
|
||||
.. image:: images/aws_s3/06_buckets_list_after.png
|
||||
:alt: List With New Bucket
|
||||
|
||||
Creating a user
|
||||
---------------
|
||||
|
||||
Use the "Services" menu of the AWS web interface to navigate to IAM. This will
|
||||
bring you to the IAM homepage. To create a new user, click on the "Users" menu
|
||||
entry on the left:
|
||||
|
||||
.. image:: images/aws_s3/07_iam_start.png
|
||||
:alt: IAM Home Page
|
||||
|
||||
In case you already have set-up users with IAM before, you will see a list of
|
||||
them here. Use the "Add user" button at the top to create a new user:
|
||||
|
||||
.. image:: images/aws_s3/08_user_list.png
|
||||
:alt: IAM User List
|
||||
|
||||
For this tutorial, the new user will be named ``restic-demo-user``. Feel free to
|
||||
choose your own name that best fits your needs. This user will only ever access
|
||||
AWS through the ``restic`` program and not through the web interface. Therefore,
|
||||
"Programmatic access" is selected for "Access type":
|
||||
|
||||
.. image:: images/aws_s3/09_user_name.png
|
||||
:alt: Choose User Name and Access Type
|
||||
|
||||
During the next step, permissions can be assigned to the new user. To use this
|
||||
user with restic, it only needs access to the ``restic-demo`` bucket. Select
|
||||
"Attach existing policies directly", which will bring up a list of pre-defined
|
||||
policies below. Afterwards, click the "Create policy" button to create a custom
|
||||
policy:
|
||||
|
||||
.. image:: images/aws_s3/10_user_pre_policy.png
|
||||
:alt: Assign a Policy
|
||||
|
||||
A new browser window or tab will open with the policy wizard. In Amazon IAM,
|
||||
policies are defined as JSON documents. For this tutorial, the "Policy
|
||||
Generator" will be used to generate a policy file using a web interface:
|
||||
|
||||
.. image:: images/aws_s3/11_policy_start.png
|
||||
:alt: Create a New Policy
|
||||
|
||||
After invoking the policy generator, you will be presented with a user
|
||||
interface to generate individual permission statements. For restic to work, two
|
||||
such statements must be created. The first statement is set up as follows:
|
||||
|
||||
.. code::
|
||||
|
||||
Effect: Allow
|
||||
Service: Amazon S3
|
||||
Actions: DeleteObject, GetObject, PutObject
|
||||
Resource: arn:aws:s3:::restic-demo/*
|
||||
|
||||
This statement allows restic to create, read and delete objects inside the S3
|
||||
bucket named ``restic-demo``. Adjust the bucket's name to the name of the bucket
|
||||
you created earlier. Using the "Add Statement" button, this statement can be
|
||||
saved. Now a second statement is created:
|
||||
|
||||
.. code::
|
||||
|
||||
Effect: Allow
|
||||
Service: Amazon S3
|
||||
Actions: ListBucket
|
||||
Resource: arn:aws:s3:::restic-demo
|
||||
|
||||
Again, substitute ``restic-demo`` with the actual name of your bucket. Note that,
|
||||
unlike before, there is no ``/*`` after the bucket name. This statement allows
|
||||
restic to list the objects stored in the ``restic-demo`` bucket. Again, use "Add
|
||||
Statement" to save this statement. The policy creator interface should now
|
||||
look as follows:
|
||||
|
||||
.. image:: images/aws_s3/12_policy_permissions_done.png
|
||||
:alt: Policy Creator With Two Statements
|
||||
|
||||
Continue to the next step and enter a name and description for this policy. For
|
||||
this tutorial, the policy will be named ``restic-demo-policy``. In this step you
|
||||
can also examine the JSON document created by the policy generator. Click
|
||||
"Create Policy" to finish the process:
|
||||
|
||||
.. image:: images/aws_s3/13_policy_review.png
|
||||
:alt: Policy Review
|
||||
|
||||
Go back to the browser window or tab where you were previously creating the new
|
||||
user. Click the button labeled "Refresh" above the list of policies to make
|
||||
sure the newly created policy is available to you. Afterwards, use the search
|
||||
function to search for the ``restic-demo-policy``. Select this policy using the
|
||||
checkbox on the left. Then, continue to the next step.
|
||||
|
||||
.. image:: images/aws_s3/14_user_attach_policy.png
|
||||
:alt: Attach Policy to User
|
||||
|
||||
The next page will present an overview of the user account that is about to be
|
||||
created. If everything looks good, click "Create user" to complete the process:
|
||||
|
||||
.. image:: images/aws_s3/15_user_review.png
|
||||
:alt: User Creation Review
|
||||
|
||||
After the user has been created, its access credentials will be displayed. They
|
||||
consist of the "Access key ID" (think user name), and the "Secret access key"
|
||||
(think password). Copy these down to a safe place.
|
||||
|
||||
.. image:: images/aws_s3/16_user_created.png
|
||||
:alt: User Credentials
|
||||
|
||||
You have now completed the configuration in AWS. Feel free to close your web
|
||||
browser now.
|
||||
|
||||
|
||||
Initializing the restic repository
|
||||
----------------------------------
|
||||
|
||||
Open a terminal and make sure you have the ``restic`` binary ready. First, choose
|
||||
a password to encrypt your backups with. In this tutorial, ``apg`` is used for
|
||||
this purpose:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ apg -a 1 -m 32 -n 1 -M NCL
|
||||
I9n7G7G0ZpDWA3GOcJbIuwQCGvGUBkU5
|
||||
|
||||
Note this password somewhere safe along with your AWS credentials. Next, the
|
||||
configuration of restic will be placed into environment variables. This will
|
||||
include sensitive information, such as your AWS secret and repository password.
|
||||
Therefore, make sure the next commands **do not** end up in your shell's
|
||||
history file. Adjust the contents of the environment variables to fit your
|
||||
bucket's name and your user's API credentials.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ unset HISTFILE
|
||||
$ export RESTIC_REPOSITORY="s3:https://s3.amazonaws.com/restic-demo"
|
||||
$ export AWS_ACCESS_KEY_ID="AKIAJAJSLTZCAZ4SRI5Q"
|
||||
$ export AWS_SECRET_ACCESS_KEY="LaJtZPoVvGbXsaD2LsxvJZF/7LRi4FhT0TK4gDQq"
|
||||
$ export RESTIC_PASSWORD="I9n7G7G0ZpDWA3GOcJbIuwQCGvGUBkU5"
|
||||
|
||||
|
||||
After the environment is set up, restic may be called to initialize the
|
||||
repository:
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic init
|
||||
created restic backend b5c661a86a at s3:https://s3.amazonaws.com/restic-demo
|
||||
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is
|
||||
irrecoverably lost.
|
||||
|
||||
restic is now ready to be used with AWS S3. Try to create a backup:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ dd if=/dev/urandom bs=1M count=10 of=test.bin
|
||||
10+0 records in
|
||||
10+0 records out
|
||||
10485760 bytes (10 MB, 10 MiB) copied, 0,0891322 s, 118 MB/s
|
||||
|
||||
$ ./restic backup test.bin
|
||||
scan [/home/philip/restic-demo/test.bin]
|
||||
scanned 0 directories, 1 files in 0:00
|
||||
[0:04] 100.00% 2.500 MiB/s 10.000 MiB / 10.000 MiB 1 / 1 items ... ETA 0:00
|
||||
duration: 0:04, 2.47MiB/s
|
||||
snapshot 10fdbace saved
|
||||
|
||||
$ ./restic snapshots
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
10fdbace 2017-03-26 16:41:50 blackbox /home/philip/restic-demo/test.bin
|
||||
|
||||
A snapshot was created and stored in the S3 bucket. This snapshot may now be
|
||||
restored:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir restore
|
||||
|
||||
$ ./restic restore 10fdbace --target restore
|
||||
restoring <Snapshot 10fdbace of [/home/philip/restic-demo/test.bin] at 2017-03-26 16:41:50.201418102 +0200 CEST by philip@blackbox> to restore
|
||||
|
||||
$ ls restore/
|
||||
test.bin
|
||||
|
||||
The snapshot was successfully restored. This concludes the tutorial.
|
||||
|
5
doc/tutorials.rst
Normal file
@@ -0,0 +1,5 @@
|
||||
==========
|
||||
Tutorials
|
||||
==========
|
||||
|
||||
.. include:: tutorial_aws_s3.rst
|
11
mkdocs.yml
@@ -1,11 +0,0 @@
|
||||
site_name: Documentation for restic
|
||||
theme: readthedocs
|
||||
markdown_extensions:
|
||||
- codehilite:
|
||||
extra_css:
|
||||
- code.css
|
||||
docs_dir: doc
|
||||
pages:
|
||||
- Getting Started: index.md
|
||||
- User Manual: Manual.md
|
||||
- restic Design Document: Design.md
|
@@ -9,12 +9,10 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
@@ -26,19 +24,6 @@ var ForbiddenImports = map[string]bool{
|
||||
}
|
||||
|
||||
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
|
||||
var minioServer = flag.String("minio", "", "path to the minio server binary")
|
||||
var debug = flag.Bool("debug", false, "output debug messages")
|
||||
|
||||
var minioServerEnv = map[string]string{
|
||||
"MINIO_ACCESS_KEY": "KEBIYDZ87HCIH5D17YCN",
|
||||
"MINIO_SECRET_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
|
||||
}
|
||||
|
||||
var minioEnv = map[string]string{
|
||||
"RESTIC_TEST_S3_SERVER": "http://127.0.0.1:9000",
|
||||
"AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN",
|
||||
"AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
@@ -54,22 +39,11 @@ type CIEnvironment interface {
|
||||
// TravisEnvironment is the environment in which Travis tests run.
|
||||
type TravisEnvironment struct {
|
||||
goxOSArch []string
|
||||
minio string
|
||||
|
||||
minioSrv *Background
|
||||
minioTempdir string
|
||||
|
||||
env map[string]string
|
||||
env map[string]string
|
||||
}
|
||||
|
||||
func (env *TravisEnvironment) getMinio() error {
|
||||
if *minioServer != "" {
|
||||
msg("using minio server at %q\n", *minioServer)
|
||||
env.minio = *minioServer
|
||||
return nil
|
||||
}
|
||||
|
||||
tempfile, err := ioutil.TempFile("", "minio-server-")
|
||||
tempfile, err := os.Create(filepath.Join(os.Getenv("GOPATH"), "bin", "minio"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create tempfile for minio download failed: %v\n", err)
|
||||
}
|
||||
@@ -104,41 +78,6 @@ func (env *TravisEnvironment) getMinio() error {
|
||||
}
|
||||
|
||||
msg("downloaded minio server to %v\n", tempfile.Name())
|
||||
env.minio = tempfile.Name()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *TravisEnvironment) runMinio() error {
|
||||
if env.minio == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// start minio server
|
||||
msg("starting minio server at %s", env.minio)
|
||||
|
||||
dir, err := ioutil.TempDir("", "minio-root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("running minio server failed: %v", err)
|
||||
}
|
||||
|
||||
env.minioSrv, err = StartBackgroundCommand(minioServerEnv, env.minio,
|
||||
"server",
|
||||
"--address", "127.0.0.1:9000",
|
||||
dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running minio server: %v", err)
|
||||
}
|
||||
|
||||
// go func() {
|
||||
// time.Sleep(300 * time.Millisecond)
|
||||
// env.minioSrv.Cmd.Process.Kill()
|
||||
// }()
|
||||
|
||||
for k, v := range minioEnv {
|
||||
env.env[k] = v
|
||||
}
|
||||
|
||||
env.minioTempdir = dir
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -148,10 +87,14 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
|
||||
msg("preparing environment for Travis CI\n")
|
||||
|
||||
for _, pkg := range []string{
|
||||
pkgs := []string{
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"github.com/pierrre/gotestcover",
|
||||
} {
|
||||
"github.com/NebulousLabs/glyphcheck",
|
||||
"github.com/restic/rest-server/cmd/rest-server",
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
err := run("go", "get", pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -161,11 +104,8 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
if err := env.getMinio(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := env.runMinio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *runCrossCompile && !(runtime.Version() < "go1.7") {
|
||||
if *runCrossCompile {
|
||||
// only test cross compilation on linux with Travis
|
||||
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
|
||||
return err
|
||||
@@ -177,25 +117,13 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
"darwin/386", "darwin/amd64",
|
||||
"freebsd/386", "freebsd/amd64",
|
||||
"openbsd/386", "openbsd/amd64",
|
||||
}
|
||||
if !strings.HasPrefix(runtime.Version(), "go1.3") {
|
||||
env.goxOSArch = append(env.goxOSArch,
|
||||
"linux/arm", "freebsd/arm")
|
||||
"linux/arm", "freebsd/arm",
|
||||
}
|
||||
} else {
|
||||
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
|
||||
}
|
||||
|
||||
msg("gox: OS/ARCH %v\n", env.goxOSArch)
|
||||
|
||||
if runtime.Version() < "go1.5" {
|
||||
err := run("gox", "-build-toolchain",
|
||||
"-osarch", strings.Join(env.goxOSArch, " "))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -204,107 +132,15 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
// Teardown stops backend services and cleans the environment again.
|
||||
func (env *TravisEnvironment) Teardown() error {
|
||||
msg("run travis teardown\n")
|
||||
if env.minioSrv != nil {
|
||||
msg("stopping minio server\n")
|
||||
|
||||
if env.minioSrv.Cmd.ProcessState == nil {
|
||||
err := env.minioSrv.Cmd.Process.Kill()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error killing minio server process: %v", err)
|
||||
}
|
||||
} else {
|
||||
result := <-env.minioSrv.Result
|
||||
if result.Error != nil {
|
||||
msg("minio server returned error: %v\n", result.Error)
|
||||
msg("stdout: %s\n", result.Stdout)
|
||||
msg("stderr: %s\n", result.Stderr)
|
||||
}
|
||||
}
|
||||
|
||||
err := os.RemoveAll(env.minioTempdir)
|
||||
if err != nil {
|
||||
msg("error removing minio tempdir %v: %v\n", env.minioTempdir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func goVersionAtLeast151() bool {
|
||||
v := runtime.Version()
|
||||
|
||||
if match, _ := regexp.MatchString(`^go1\.[0-4]`, v); match {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == "go1.5" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Background is a program running in the background.
|
||||
type Background struct {
|
||||
Cmd *exec.Cmd
|
||||
Result chan Result
|
||||
}
|
||||
|
||||
// Result is the result of a program that ran in the background.
|
||||
type Result struct {
|
||||
Stdout, Stderr string
|
||||
Error error
|
||||
}
|
||||
|
||||
// StartBackgroundCommand runs a program in the background.
|
||||
func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (*Background, error) {
|
||||
msg("running background command %v %v\n", cmd, args)
|
||||
b := Background{
|
||||
Result: make(chan Result, 1),
|
||||
}
|
||||
|
||||
stdout := bytes.NewBuffer(nil)
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
|
||||
c := exec.Command(cmd, args...)
|
||||
c.Stdout = stdout
|
||||
c.Stderr = stderr
|
||||
|
||||
if *debug {
|
||||
c.Stdout = io.MultiWriter(c.Stdout, os.Stdout)
|
||||
c.Stderr = io.MultiWriter(c.Stderr, os.Stderr)
|
||||
}
|
||||
c.Env = updateEnv(os.Environ(), env)
|
||||
|
||||
b.Cmd = c
|
||||
|
||||
err := c.Start()
|
||||
if err != nil {
|
||||
msg("error starting background job %v: %v\n", cmd, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := b.Cmd.Wait()
|
||||
msg("background job %v returned: %v\n", cmd, err)
|
||||
msg("stdout: %s\n", stdout.Bytes())
|
||||
msg("stderr: %s\n", stderr.Bytes())
|
||||
b.Result <- Result{
|
||||
Stdout: string(stdout.Bytes()),
|
||||
Stderr: string(stderr.Bytes()),
|
||||
Error: err,
|
||||
}
|
||||
}()
|
||||
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// RunTests starts the tests for Travis.
|
||||
func (env *TravisEnvironment) RunTests() error {
|
||||
// do not run fuse tests on darwin
|
||||
if runtime.GOOS == "darwin" {
|
||||
msg("skip fuse integration tests on %v\n", runtime.GOOS)
|
||||
os.Setenv("RESTIC_TEST_FUSE", "0")
|
||||
_ = os.Setenv("RESTIC_TEST_FUSE", "0")
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
@@ -314,7 +150,37 @@ func (env *TravisEnvironment) RunTests() error {
|
||||
|
||||
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
|
||||
|
||||
if *runCrossCompile && !(runtime.Version() < "go1.7") {
|
||||
// ensure that the following tests cannot be silently skipped on Travis
|
||||
ensureTests := []string{
|
||||
"restic/backend/rest.TestBackendREST",
|
||||
"restic/backend/sftp.TestBackendSFTP",
|
||||
"restic/backend/s3.TestBackendMinio",
|
||||
}
|
||||
|
||||
// if the test s3 repository is available, make sure that the test is not skipped
|
||||
if os.Getenv("RESTIC_TEST_S3_REPOSITORY") != "" {
|
||||
ensureTests = append(ensureTests, "restic/backend/s3.TestBackendS3")
|
||||
} else {
|
||||
msg("S3 repository not available\n")
|
||||
}
|
||||
|
||||
// if the test swift service is available, make sure that the test is not skipped
|
||||
if os.Getenv("RESTIC_TEST_SWIFT") != "" {
|
||||
ensureTests = append(ensureTests, "restic/backend/swift.TestBackendSwift")
|
||||
} else {
|
||||
msg("Swift service not available\n")
|
||||
}
|
||||
|
||||
// if the test b2 repository is available, make sure that the test is not skipped
|
||||
if os.Getenv("RESTIC_TEST_B2_REPOSITORY") != "" {
|
||||
ensureTests = append(ensureTests, "restic/backend/b2.TestBackendB2")
|
||||
} else {
|
||||
msg("B2 repository not available\n")
|
||||
}
|
||||
|
||||
env.env["RESTIC_TEST_DISALLOW_SKIP"] = strings.Join(ensureTests, ",")
|
||||
|
||||
if *runCrossCompile {
|
||||
// compile for all target architectures with tags
|
||||
for _, tags := range []string{"release", "debug"} {
|
||||
err := runWithEnv(env.env, "gox", "-verbose",
|
||||
@@ -343,6 +209,10 @@ func (env *TravisEnvironment) RunTests() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = runGlyphcheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deps, err := findImports()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -387,16 +257,16 @@ func (env *AppveyorEnvironment) Teardown() error {
|
||||
// findGoFiles returns a list of go source code file names below dir.
|
||||
func findGoFiles(dir string) (list []string, err error) {
|
||||
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
|
||||
if filepath.Base(name) == "vendor" {
|
||||
relpath, err := filepath.Rel(dir, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if relpath == "vendor" || relpath == "pkg" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
if filepath.Ext(name) == ".go" {
|
||||
relpath, err := filepath.Rel(dir, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if filepath.Ext(relpath) == ".go" {
|
||||
list = append(list, relpath)
|
||||
}
|
||||
|
||||
@@ -498,6 +368,18 @@ func runGofmt() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runGlyphcheck() error {
|
||||
cmd := exec.Command("glyphcheck", "./...")
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
buf, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running glyphcheck: %v\noutput: %s\n", err, buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func run(command string, args ...string) error {
|
||||
msg("run %v %v\n", command, strings.Join(args, " "))
|
||||
return runWithEnv(nil, command, args...)
|
||||
|
36
src/cmds/restic/cmd_autocomplete.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var autocompleteTarget string
|
||||
|
||||
var cmdAutocomplete = &cobra.Command{
|
||||
Use: "autocomplete",
|
||||
Short: "generate shell autocompletion script",
|
||||
Long: `The "autocomplete" command generates a shell autocompletion script.
|
||||
|
||||
NOTE: The current version supports Bash only.
|
||||
This should work for *nix systems with Bash installed.
|
||||
|
||||
By default, the file is written directly to /etc/bash_completion.d
|
||||
for convenience, and the command may need superuser rights, e.g.:
|
||||
|
||||
$ sudo restic autocomplete`,
|
||||
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := cmdRoot.GenBashCompletionFile(autocompleteTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdAutocomplete)
|
||||
|
||||
cmdAutocomplete.Flags().StringVarP(&autocompleteTarget, "completionfile", "", "/etc/bash_completion.d/restic.sh", "autocompletion file")
|
||||
// For bash-completion
|
||||
cmdAutocomplete.Flags().SetAnnotation("completionfile", cobra.BashCompFilenameExt, []string{})
|
||||
}
|
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -44,7 +45,7 @@ type BackupOptions struct {
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
ExcludeFile string
|
||||
ExcludeFiles []string
|
||||
ExcludeOtherFS bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
@@ -68,7 +69,7 @@ func init() {
|
||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
|
||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
||||
f.StringSliceVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.ExcludeFile, "exclude-file", "", "read exclude patterns from a file")
|
||||
f.StringSliceVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
|
||||
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
|
||||
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
|
||||
@@ -220,8 +221,8 @@ func filterExisting(items []string) (result []string, err error) {
|
||||
|
||||
// gatherDevices returns the set of unique device ids of the files and/or
|
||||
// directory paths listed in "items".
|
||||
func gatherDevices(items []string) (deviceMap map[uint64]struct{}, err error) {
|
||||
deviceMap = make(map[uint64]struct{})
|
||||
func gatherDevices(items []string) (deviceMap map[string]uint64, err error) {
|
||||
deviceMap = make(map[string]uint64)
|
||||
for _, item := range items {
|
||||
fi, err := fs.Lstat(item)
|
||||
if err != nil {
|
||||
@@ -231,7 +232,7 @@ func gatherDevices(items []string) (deviceMap map[uint64]struct{}, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceMap[id] = struct{}{}
|
||||
deviceMap[item] = id
|
||||
}
|
||||
if len(deviceMap) == 0 {
|
||||
return nil, errors.New("zero allowed devices")
|
||||
@@ -263,7 +264,7 @@ func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string)
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -274,7 +275,7 @@ func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string)
|
||||
Hostname: opts.Hostname,
|
||||
}
|
||||
|
||||
_, id, err := r.Archive(opts.StdinFilename, os.Stdin, newArchiveStdinProgress(gopts))
|
||||
_, id, err := r.Archive(context.TODO(), opts.StdinFilename, os.Stdin, newArchiveStdinProgress(gopts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -352,7 +353,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
// allowed devices
|
||||
var allowedDevs map[uint64]struct{}
|
||||
var allowedDevs map[string]uint64
|
||||
if opts.ExcludeOtherFS {
|
||||
allowedDevs, err = gatherDevices(target)
|
||||
if err != nil {
|
||||
@@ -372,7 +373,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -391,7 +392,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
// Find last snapshot to set it as parent, if not already set
|
||||
if !opts.Force && parentSnapshotID == nil {
|
||||
id, err := restic.FindLatestSnapshot(repo, target, opts.Tags, opts.Hostname)
|
||||
id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, opts.Tags, opts.Hostname)
|
||||
if err == nil {
|
||||
parentSnapshotID = &id
|
||||
} else if err != restic.ErrNoSnapshotFound {
|
||||
@@ -406,17 +407,28 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
Verbosef("scan %v\n", target)
|
||||
|
||||
// add patterns from file
|
||||
if opts.ExcludeFile != "" {
|
||||
file, err := fs.Open(opts.ExcludeFile)
|
||||
if err != nil {
|
||||
Warnf("error reading exclude patterns: %v", err)
|
||||
return nil
|
||||
}
|
||||
if len(opts.ExcludeFiles) > 0 {
|
||||
for _, filename := range opts.ExcludeFiles {
|
||||
file, err := fs.Open(filename)
|
||||
if err != nil {
|
||||
Warnf("error reading exclude patterns: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
// ignore empty lines
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// strip comments
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.HasPrefix(line, "#") {
|
||||
line = os.ExpandEnv(line)
|
||||
opts.Excludes = append(opts.Excludes, line)
|
||||
}
|
||||
@@ -444,13 +456,24 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
// errored out earlier. If it still does that's a reason to panic.
|
||||
panic(err)
|
||||
}
|
||||
_, found := allowedDevs[id]
|
||||
if !found {
|
||||
debug.Log("path %q on disallowed device %d", item, id)
|
||||
return false
|
||||
|
||||
for dir := item; dir != ""; dir = filepath.Dir(dir) {
|
||||
debug.Log("item %v, test dir %v", item, dir)
|
||||
|
||||
allowedID, ok := allowedDevs[dir]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if allowedID != id {
|
||||
debug.Log("path %q on disallowed device %d", item, id)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
panic(fmt.Sprintf("item %v, device id %v not found, allowedDevs: %v", item, id, allowedDevs))
|
||||
}
|
||||
|
||||
stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts))
|
||||
@@ -467,7 +490,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
Warnf("%s\rwarning for %s: %v\n", ClearLine(), dir, err)
|
||||
}
|
||||
|
||||
_, id, err := arch.Snapshot(newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID)
|
||||
_, id, err := arch.Snapshot(context.TODO(), newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -73,7 +74,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "index":
|
||||
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
|
||||
buf, err := repo.LoadAndDecrypt(context.TODO(), restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -83,7 +84,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
|
||||
case "snapshot":
|
||||
sn := &restic.Snapshot{}
|
||||
err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn)
|
||||
err = repo.LoadJSONUnpacked(context.TODO(), restic.SnapshotFile, id, sn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -98,7 +99,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
case "key":
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(context.TODO(), repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -125,7 +126,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "lock":
|
||||
lock, err := restic.LoadLock(repo, id)
|
||||
lock, err := restic.LoadLock(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
// load index, handle all the other types
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -149,7 +150,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
switch tpe {
|
||||
case "pack":
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(context.TODO(), repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +172,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
blob := list[0]
|
||||
|
||||
buf := make([]byte, blob.Length)
|
||||
n, err := repo.LoadBlob(t, id, buf)
|
||||
n, err := repo.LoadBlob(context.TODO(), t, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
@@ -92,7 +93,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
chkr := checker.New(repo)
|
||||
|
||||
Verbosef("Load indexes\n")
|
||||
hints, errs := chkr.LoadIndex()
|
||||
hints, errs := chkr.LoadIndex(context.TODO())
|
||||
|
||||
dupFound := false
|
||||
for _, hint := range hints {
|
||||
@@ -113,14 +114,11 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("LoadIndex returned errors")
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
errorsFound := false
|
||||
errChan := make(chan error)
|
||||
|
||||
Verbosef("Check all packs\n")
|
||||
go chkr.Packs(errChan, done)
|
||||
go chkr.Packs(context.TODO(), errChan)
|
||||
|
||||
for err := range errChan {
|
||||
errorsFound = true
|
||||
@@ -129,7 +127,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
Verbosef("Check snapshots, trees and blobs\n")
|
||||
errChan = make(chan error)
|
||||
go chkr.Structure(errChan, done)
|
||||
go chkr.Structure(context.TODO(), errChan)
|
||||
|
||||
for err := range errChan {
|
||||
errorsFound = true
|
||||
@@ -156,7 +154,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
|
||||
errChan := make(chan error)
|
||||
|
||||
go chkr.ReadData(p, errChan, done)
|
||||
go chkr.ReadData(context.TODO(), p, errChan)
|
||||
|
||||
for err := range errChan {
|
||||
errorsFound = true
|
||||
|
@@ -1,8 +1,9 @@
|
||||
// +build debug
|
||||
// xbuild debug
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -44,11 +45,8 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error {
|
||||
}
|
||||
|
||||
func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range repo.List(restic.SnapshotFile, done) {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
for id := range repo.List(context.TODO(), restic.SnapshotFile) {
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
|
||||
continue
|
||||
@@ -83,15 +81,12 @@ type Blob struct {
|
||||
}
|
||||
|
||||
func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
|
||||
f := func(ctx context.Context, job worker.Job) (interface{}, error) {
|
||||
name := job.Data.(string)
|
||||
|
||||
h := restic.Handle{Type: restic.DataFile, Name: name}
|
||||
|
||||
blobInfo, err := repo.Backend().Stat(h)
|
||||
blobInfo, err := repo.Backend().Stat(ctx, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,10 +101,10 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
|
||||
jobCh := make(chan worker.Job)
|
||||
resCh := make(chan worker.Job)
|
||||
wp := worker.New(dumpPackWorkers, f, jobCh, resCh)
|
||||
wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
|
||||
|
||||
go func() {
|
||||
for name := range repo.Backend().List(restic.DataFile, done) {
|
||||
for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
|
||||
jobCh <- worker.Job{Data: name}
|
||||
}
|
||||
close(jobCh)
|
||||
@@ -146,13 +141,10 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
}
|
||||
|
||||
func dumpIndexes(repo restic.Repository) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range repo.List(restic.IndexFile, done) {
|
||||
for id := range repo.List(context.TODO(), restic.IndexFile) {
|
||||
fmt.Printf("index_id: %v\n", id)
|
||||
|
||||
idx, err := repository.LoadIndex(repo, id)
|
||||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -184,7 +176,7 @@ func runDump(gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -11,7 +12,6 @@ import (
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
var cmdFind = &cobra.Command{
|
||||
@@ -43,8 +43,8 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdFind)
|
||||
|
||||
f := cmdFind.Flags()
|
||||
f.StringVarP(&findOptions.Oldest, "oldest", "o", "", "oldest modification date/time")
|
||||
f.StringVarP(&findOptions.Newest, "newest", "n", "", "newest modification date/time")
|
||||
f.StringVarP(&findOptions.Oldest, "oldest", "O", "", "oldest modification date/time")
|
||||
f.StringVarP(&findOptions.Newest, "newest", "N", "", "newest modification date/time")
|
||||
f.StringSliceVarP(&findOptions.Snapshots, "snapshot", "s", nil, "snapshot `id` to search in (can be given multiple times)")
|
||||
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
||||
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||
@@ -84,63 +84,163 @@ func parseTime(str string) (time.Time, error) {
|
||||
return time.Time{}, errors.Fatalf("unable to parse time: %q", str)
|
||||
}
|
||||
|
||||
func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, prefix string, snapshotID *string) error {
|
||||
debug.Log("checking tree %v\n", id)
|
||||
type statefulOutput struct {
|
||||
ListLong bool
|
||||
JSON bool
|
||||
inuse bool
|
||||
newsn *restic.Snapshot
|
||||
oldsn *restic.Snapshot
|
||||
hits int
|
||||
}
|
||||
|
||||
tree, err := repo.LoadTree(id)
|
||||
func (s *statefulOutput) PrintJSON(prefix string, node *restic.Node) {
|
||||
type findNode restic.Node
|
||||
b, err := json.Marshal(struct {
|
||||
// Add these attributes
|
||||
Path string `json:"path,omitempty"`
|
||||
Permissions string `json:"permissions,omitempty"`
|
||||
|
||||
*findNode
|
||||
|
||||
// Make the following attributes disappear
|
||||
Name byte `json:"name,omitempty"`
|
||||
Inode byte `json:"inode,omitempty"`
|
||||
ExtendedAttributes byte `json:"extended_attributes,omitempty"`
|
||||
Device byte `json:"device,omitempty"`
|
||||
Content byte `json:"content,omitempty"`
|
||||
Subtree byte `json:"subtree,omitempty"`
|
||||
}{
|
||||
Path: filepath.Join(prefix, node.Name),
|
||||
Permissions: node.Mode.String(),
|
||||
findNode: (*findNode)(node),
|
||||
})
|
||||
if err != nil {
|
||||
Warnf("Marshall failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
if !s.inuse {
|
||||
Printf("[")
|
||||
s.inuse = true
|
||||
}
|
||||
if s.newsn != s.oldsn {
|
||||
if s.oldsn != nil {
|
||||
Printf("],\"hits\":%d,\"snapshot\":%q},", s.hits, s.oldsn.ID())
|
||||
}
|
||||
Printf(`{"matches":[`)
|
||||
s.oldsn = s.newsn
|
||||
s.hits = 0
|
||||
}
|
||||
if s.hits > 0 {
|
||||
Printf(",")
|
||||
}
|
||||
Printf(string(b))
|
||||
s.hits++
|
||||
}
|
||||
|
||||
func (s *statefulOutput) PrintNormal(prefix string, node *restic.Node) {
|
||||
if s.newsn != s.oldsn {
|
||||
if s.oldsn != nil {
|
||||
Verbosef("\n")
|
||||
}
|
||||
s.oldsn = s.newsn
|
||||
Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID())
|
||||
}
|
||||
Printf(formatNode(prefix, node, s.ListLong) + "\n")
|
||||
}
|
||||
|
||||
func (s *statefulOutput) Print(prefix string, node *restic.Node) {
|
||||
if s.JSON {
|
||||
s.PrintJSON(prefix, node)
|
||||
} else {
|
||||
s.PrintNormal(prefix, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statefulOutput) Finish() {
|
||||
if s.JSON {
|
||||
// do some finishing up
|
||||
if s.oldsn != nil {
|
||||
Printf("],\"hits\":%d,\"snapshot\":%q}", s.hits, s.oldsn.ID())
|
||||
}
|
||||
if s.inuse {
|
||||
Printf("]\n")
|
||||
} else {
|
||||
Printf("[]\n")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Finder bundles information needed to find a file or directory.
|
||||
type Finder struct {
|
||||
repo restic.Repository
|
||||
pat findPattern
|
||||
out statefulOutput
|
||||
notfound restic.IDSet
|
||||
}
|
||||
|
||||
func (f *Finder) findInTree(treeID restic.ID, prefix string) error {
|
||||
if f.notfound.Has(treeID) {
|
||||
debug.Log("%v skipping tree %v, has already been checked", prefix, treeID.Str())
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log("%v checking tree %v\n", prefix, treeID.Str())
|
||||
|
||||
tree, err := f.repo.LoadTree(context.TODO(), treeID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, node := range tree.Nodes {
|
||||
debug.Log(" testing entry %q\n", node.Name)
|
||||
|
||||
name := node.Name
|
||||
if pat.ignoreCase {
|
||||
if f.pat.ignoreCase {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
m, err := filepath.Match(pat.pattern, name)
|
||||
m, err := filepath.Match(f.pat.pattern, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m {
|
||||
debug.Log(" pattern matches\n")
|
||||
if !pat.oldest.IsZero() && node.ModTime.Before(pat.oldest) {
|
||||
debug.Log(" ModTime is older than %s\n", pat.oldest)
|
||||
if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) {
|
||||
debug.Log(" ModTime is older than %s\n", f.pat.oldest)
|
||||
continue
|
||||
}
|
||||
|
||||
if !pat.newest.IsZero() && node.ModTime.After(pat.newest) {
|
||||
debug.Log(" ModTime is newer than %s\n", pat.newest)
|
||||
if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) {
|
||||
debug.Log(" ModTime is newer than %s\n", f.pat.newest)
|
||||
continue
|
||||
}
|
||||
|
||||
if snapshotID != nil {
|
||||
Verbosef("Found matching entries in snapshot %s\n", *snapshotID)
|
||||
snapshotID = nil
|
||||
}
|
||||
Printf(formatNode(prefix, node, findOptions.ListLong) + "\n")
|
||||
} else {
|
||||
debug.Log(" pattern does not match\n")
|
||||
debug.Log(" found match\n")
|
||||
found = true
|
||||
f.out.Print(prefix, node)
|
||||
}
|
||||
|
||||
if node.Type == "dir" {
|
||||
if err := findInTree(repo, pat, *node.Subtree, filepath.Join(prefix, node.Name), snapshotID); err != nil {
|
||||
if err := f.findInTree(*node.Subtree, filepath.Join(prefix, node.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
f.notfound.Insert(treeID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findInSnapshot(repo *repository.Repository, sn *restic.Snapshot, pat findPattern) error {
|
||||
debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), pat.oldest, pat.newest)
|
||||
func (f *Finder) findInSnapshot(sn *restic.Snapshot) error {
|
||||
debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), f.pat.oldest, f.pat.newest)
|
||||
|
||||
snapshotID := sn.ID().Str()
|
||||
if err := findInTree(repo, pat, *sn.Tree, string(filepath.Separator), &snapshotID); err != nil {
|
||||
f.out.newsn = sn
|
||||
if err := f.findInTree(*sn.Tree, string(filepath.Separator)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -183,17 +283,25 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err = repo.LoadIndex(); err != nil {
|
||||
if err = repo.LoadIndex(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
|
||||
f := &Finder{
|
||||
repo: repo,
|
||||
pat: pat,
|
||||
out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
|
||||
notfound: restic.NewIDSet(),
|
||||
}
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) {
|
||||
if err = findInSnapshot(repo, sn, pat); err != nil {
|
||||
if err = f.findInSnapshot(sn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.out.Finish()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -66,6 +66,8 @@ func init() {
|
||||
|
||||
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
||||
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
|
||||
|
||||
f.SortFlags = false
|
||||
}
|
||||
|
||||
func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
@@ -88,14 +90,14 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
snapshotGroups := make(map[string]restic.Snapshots)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
if len(args) > 0 {
|
||||
// When explicit snapshots args are given, remove them immediately.
|
||||
if !opts.DryRun {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
if err = repo.Backend().Remove(h); err != nil {
|
||||
if err = repo.Backend().Remove(context.TODO(), h); err != nil {
|
||||
return err
|
||||
}
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
@@ -142,19 +144,19 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
if opts.GroupByTags {
|
||||
Printf("snapshots for host %v, tags [%v], paths: [%v]:\n\n", key.Hostname, strings.Join(key.Tags, ", "), strings.Join(key.Paths, ", "))
|
||||
Verbosef("snapshots for host %v, tags [%v], paths: [%v]:\n\n", key.Hostname, strings.Join(key.Tags, ", "), strings.Join(key.Paths, ", "))
|
||||
} else {
|
||||
Printf("snapshots for host %v, paths: [%v]:\n\n", key.Hostname, strings.Join(key.Paths, ", "))
|
||||
Verbosef("snapshots for host %v, paths: [%v]:\n\n", key.Hostname, strings.Join(key.Paths, ", "))
|
||||
}
|
||||
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 {
|
||||
if len(keep) != 0 && !gopts.Quiet {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
if len(remove) != 0 {
|
||||
if len(remove) != 0 && !gopts.Quiet {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove)
|
||||
Printf("\n")
|
||||
@@ -165,7 +167,7 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
err = repo.Backend().Remove(h)
|
||||
err = repo.Backend().Remove(context.TODO(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -174,7 +176,7 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
if removeSnapshots > 0 && opts.Prune {
|
||||
Printf("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
if !opts.DryRun {
|
||||
return pruneRepository(gopts, repo)
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
|
||||
@@ -27,7 +28,7 @@ func runInit(gopts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("Please specify repository location (-r)")
|
||||
}
|
||||
|
||||
be, err := create(gopts.Repo)
|
||||
be, err := create(gopts.Repo, gopts.extended)
|
||||
if err != nil {
|
||||
return errors.Fatalf("create backend at %s failed: %v\n", gopts.Repo, err)
|
||||
}
|
||||
@@ -43,7 +44,7 @@ func runInit(gopts GlobalOptions, args []string) error {
|
||||
|
||||
s := repository.New(be)
|
||||
|
||||
err = s.Init(gopts.password)
|
||||
err = s.Init(context.TODO(), gopts.password)
|
||||
if err != nil {
|
||||
return errors.Fatalf("create key in backend at %s failed: %v\n", gopts.Repo, err)
|
||||
}
|
||||
|
@@ -30,8 +30,8 @@ func listKeys(ctx context.Context, s *repository.Repository) error {
|
||||
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
|
||||
tab.RowFormat = "%s%-10s %-10s %-10s %s"
|
||||
|
||||
for id := range s.List(restic.KeyFile, ctx.Done()) {
|
||||
k, err := repository.LoadKey(s, id.String())
|
||||
for id := range s.List(ctx, restic.KeyFile) {
|
||||
k, err := repository.LoadKey(ctx, s, id.String())
|
||||
if err != nil {
|
||||
Warnf("LoadKey() failed: %v\n", err)
|
||||
continue
|
||||
@@ -69,7 +69,7 @@ func addKey(gopts GlobalOptions, repo *repository.Repository) error {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := repository.AddKey(repo, pw, repo.Key())
|
||||
id, err := repository.AddKey(context.TODO(), repo, pw, repo.Key())
|
||||
if err != nil {
|
||||
return errors.Fatalf("creating new key failed: %v\n", err)
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func deleteKey(repo *repository.Repository, name string) error {
|
||||
}
|
||||
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: name}
|
||||
err := repo.Backend().Remove(h)
|
||||
err := repo.Backend().Remove(context.TODO(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,13 +100,13 @@ func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := repository.AddKey(repo, pw, repo.Key())
|
||||
id, err := repository.AddKey(context.TODO(), repo, pw, repo.Key())
|
||||
if err != nil {
|
||||
return errors.Fatalf("creating new key failed: %v\n", err)
|
||||
}
|
||||
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()}
|
||||
err = repo.Backend().Remove(h)
|
||||
err = repo.Backend().Remove(context.TODO(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"restic"
|
||||
"restic/errors"
|
||||
@@ -55,7 +56,7 @@ func runList(opts GlobalOptions, args []string) error {
|
||||
case "locks":
|
||||
t = restic.LockFile
|
||||
case "blobs":
|
||||
idx, err := index.Load(repo, nil)
|
||||
idx, err := index.Load(context.TODO(), repo, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -71,7 +72,7 @@ func runList(opts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("invalid type")
|
||||
}
|
||||
|
||||
for id := range repo.List(t, nil) {
|
||||
for id := range repo.List(context.TODO(), t) {
|
||||
Printf("%s\n", id)
|
||||
}
|
||||
|
||||
|
@@ -46,13 +46,13 @@ func init() {
|
||||
}
|
||||
|
||||
func printTree(repo *repository.Repository, id *restic.ID, prefix string) error {
|
||||
tree, err := repo.LoadTree(*id)
|
||||
tree, err := repo.LoadTree(context.TODO(), *id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range tree.Nodes {
|
||||
Printf(formatNode(prefix, entry, lsOptions.ListLong) + "\n")
|
||||
Printf("%s\n", formatNode(prefix, entry, lsOptions.ListLong))
|
||||
|
||||
if entry.Type == "dir" && entry.Subtree != nil {
|
||||
if err = printTree(repo, entry.Subtree, filepath.Join(prefix, entry.Name)); err != nil {
|
||||
@@ -74,7 +74,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = repo.LoadIndex(); err != nil {
|
||||
if err = repo.LoadIndex(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
100
src/cmds/restic/cmd_migrate.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"restic"
|
||||
"restic/migrations"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cmdMigrate = &cobra.Command{
|
||||
Use: "migrate [name]",
|
||||
Short: "apply migrations",
|
||||
Long: `
|
||||
The "migrate" command applies migrations to a repository. When no migration
|
||||
name is explicitely given, a list of migrations that can be applied is printed.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runMigrate(migrateOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// MigrateOptions bundles all options for the 'check' command.
|
||||
type MigrateOptions struct {
|
||||
}
|
||||
|
||||
var migrateOptions MigrateOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdMigrate)
|
||||
}
|
||||
|
||||
func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error {
|
||||
ctx := gopts.ctx
|
||||
Printf("available migrations:\n")
|
||||
for _, m := range migrations.All {
|
||||
ok, err := m.Check(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
Printf(" %v: %v\n", m.Name(), m.Desc())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string) error {
|
||||
ctx := gopts.ctx
|
||||
|
||||
var firsterr error
|
||||
for _, name := range args {
|
||||
for _, m := range migrations.All {
|
||||
if m.Name() == name {
|
||||
ok, err := m.Check(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
Warnf("migration %v cannot be applied: check failed\n", m.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
Printf("applying migration %v...\n", m.Name())
|
||||
if err = m.Apply(ctx, repo); err != nil {
|
||||
Warnf("migration %v failed: %v\n", m.Name(), err)
|
||||
if firsterr == nil {
|
||||
firsterr = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
Printf("migration %v: success\n", m.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return firsterr
|
||||
}
|
||||
|
||||
func runMigrate(opts MigrateOptions, gopts GlobalOptions, args []string) error {
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return checkMigrations(opts, gopts, repo)
|
||||
}
|
||||
|
||||
return applyMigrations(opts, gopts, repo, args)
|
||||
}
|
@@ -4,6 +4,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -64,7 +65,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,14 +96,26 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
systemFuse.Debug = func(msg interface{}) {
|
||||
debug.Log("fuse: %v", msg)
|
||||
}
|
||||
|
||||
cfg := fuse.Config{
|
||||
OwnerIsRoot: opts.OwnerRoot,
|
||||
Host: opts.Host,
|
||||
Tags: opts.Tags,
|
||||
Paths: opts.Paths,
|
||||
}
|
||||
root, err := fuse.NewRoot(context.TODO(), repo, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Printf("Now serving the repository at %s\n", mountpoint)
|
||||
Printf("Don't forget to umount after quitting!\n")
|
||||
|
||||
root := fs.Tree{}
|
||||
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot, opts.Paths, opts.Tags, opts.Host))
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, &root)
|
||||
err = fs.Serve(c, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
27
src/cmds/restic/cmd_options.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"restic/options"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var optionsCmd = &cobra.Command{
|
||||
Use: "options",
|
||||
Short: "print list of extended options",
|
||||
Long: `
|
||||
The "options" command prints a list of extended options.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("All Extended Options:\n")
|
||||
for _, opt := range options.List() {
|
||||
fmt.Printf(" %-15s %s\n", opt.Namespace+"."+opt.Name, opt.Text)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(optionsCmd)
|
||||
}
|
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
@@ -29,6 +28,18 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdPrune)
|
||||
}
|
||||
|
||||
func shortenStatus(maxLength int, s string) string {
|
||||
if len(s) <= maxLength {
|
||||
return s
|
||||
}
|
||||
|
||||
if maxLength < 3 {
|
||||
return s[:maxLength]
|
||||
}
|
||||
|
||||
return s[:maxLength-3] + "..."
|
||||
}
|
||||
|
||||
// newProgressMax returns a progress that counts blobs.
|
||||
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
|
||||
if !show {
|
||||
@@ -44,10 +55,7 @@ func newProgressMax(show bool, max uint64, description string) *restic.Progress
|
||||
s.Blobs, max, description)
|
||||
|
||||
if w := stdoutTerminalWidth(); w > 0 {
|
||||
if len(status) > w {
|
||||
max := w - len(status) - 4
|
||||
status = status[:max] + "... "
|
||||
}
|
||||
status = shortenStatus(w, status)
|
||||
}
|
||||
|
||||
PrintProgress("%s", status)
|
||||
@@ -76,14 +84,13 @@ func runPrune(gopts GlobalOptions) error {
|
||||
}
|
||||
|
||||
func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
err := repo.LoadIndex()
|
||||
ctx := gopts.ctx
|
||||
|
||||
err := repo.LoadIndex(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
|
||||
var stats struct {
|
||||
blobs int
|
||||
packs int
|
||||
@@ -92,18 +99,22 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
}
|
||||
|
||||
Verbosef("counting files in repo\n")
|
||||
for _ = range repo.List(restic.DataFile, ctx.Done()) {
|
||||
for range repo.List(ctx, restic.DataFile) {
|
||||
stats.packs++
|
||||
}
|
||||
|
||||
Verbosef("building new index for repo\n")
|
||||
|
||||
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
|
||||
idx, err := index.New(repo, bar)
|
||||
idx, invalidFiles, err := index.New(ctx, repo, restic.NewIDSet(), bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range invalidFiles {
|
||||
Warnf("incomplete pack file (will be removed): %v\n", id)
|
||||
}
|
||||
|
||||
blobs := 0
|
||||
for _, pack := range idx.Packs {
|
||||
stats.bytes += pack.Size
|
||||
@@ -135,7 +146,7 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
Verbosef("load all snapshots\n")
|
||||
|
||||
// find referenced blobs
|
||||
snapshots, err := restic.LoadAllSnapshots(repo)
|
||||
snapshots, err := restic.LoadAllSnapshots(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -152,12 +163,16 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
for _, sn := range snapshots {
|
||||
debug.Log("process snapshot %v", sn.ID().Str())
|
||||
|
||||
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
|
||||
err = restic.FindUsedBlobs(ctx, repo, *sn.Tree, usedBlobs, seenBlobs)
|
||||
if err != nil {
|
||||
if repo.Backend().IsNotExist(err) {
|
||||
return errors.Fatal("unable to load a tree from the repo: " + err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("found %v blobs for snapshot %v", sn.ID().Str())
|
||||
debug.Log("processed snapshot %v", sn.ID().Str())
|
||||
bar.Report(restic.Stat{Blobs: 1})
|
||||
}
|
||||
bar.Done()
|
||||
@@ -185,6 +200,12 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
|
||||
// find packs that are unneeded
|
||||
removePacks := restic.NewIDSet()
|
||||
|
||||
Verbosef("will remove %d invalid files\n", len(invalidFiles))
|
||||
for _, id := range invalidFiles {
|
||||
removePacks.Insert(id)
|
||||
}
|
||||
|
||||
for packID, p := range idx.Packs {
|
||||
|
||||
hasActiveBlob := false
|
||||
@@ -214,22 +235,28 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n",
|
||||
len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes)))
|
||||
|
||||
var repackedBlobs restic.IDSet
|
||||
if len(rewritePacks) != 0 {
|
||||
bar = newProgressMax(!gopts.Quiet, uint64(len(rewritePacks)), "packs rewritten")
|
||||
bar.Start()
|
||||
err = repository.Repack(repo, rewritePacks, usedBlobs, bar)
|
||||
repackedBlobs, err = repository.Repack(ctx, repo, rewritePacks, usedBlobs, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bar.Done()
|
||||
}
|
||||
|
||||
if err = rebuildIndex(ctx, repo, removePacks); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
removePacks.Merge(repackedBlobs)
|
||||
if len(removePacks) != 0 {
|
||||
bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted")
|
||||
bar.Start()
|
||||
for packID := range removePacks {
|
||||
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
|
||||
err = repo.Backend().Remove(h)
|
||||
err = repo.Backend().Remove(ctx, h)
|
||||
if err != nil {
|
||||
Warnf("unable to remove file %v from the repository\n", packID.Str())
|
||||
}
|
||||
@@ -238,10 +265,6 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
bar.Done()
|
||||
}
|
||||
|
||||
if err = rebuildIndex(ctx, repo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Verbosef("done\n")
|
||||
return nil
|
||||
}
|
||||
|
@@ -38,19 +38,19 @@ func runRebuildIndex(gopts GlobalOptions) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
return rebuildIndex(ctx, repo)
|
||||
return rebuildIndex(ctx, repo, restic.NewIDSet())
|
||||
}
|
||||
|
||||
func rebuildIndex(ctx context.Context, repo restic.Repository) error {
|
||||
func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet) error {
|
||||
Verbosef("counting files in repo\n")
|
||||
|
||||
var packs uint64
|
||||
for _ = range repo.List(restic.DataFile, ctx.Done()) {
|
||||
for range repo.List(ctx, restic.DataFile) {
|
||||
packs++
|
||||
}
|
||||
|
||||
bar := newProgressMax(!globalOptions.Quiet, packs, "packs")
|
||||
idx, err := index.New(repo, bar)
|
||||
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
||||
idx, _, err := index.New(ctx, repo, ignorePacks, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,11 +58,11 @@ func rebuildIndex(ctx context.Context, repo restic.Repository) error {
|
||||
Verbosef("finding old index files\n")
|
||||
|
||||
var supersedes restic.IDs
|
||||
for id := range repo.List(restic.IndexFile, ctx.Done()) {
|
||||
for id := range repo.List(ctx, restic.IndexFile) {
|
||||
supersedes = append(supersedes, id)
|
||||
}
|
||||
|
||||
id, err := idx.Save(repo, supersedes)
|
||||
id, err := idx.Save(ctx, repo, supersedes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func rebuildIndex(ctx context.Context, repo restic.Repository) error {
|
||||
Verbosef("remove %d old index files\n", len(supersedes))
|
||||
|
||||
for _, id := range supersedes {
|
||||
if err := repo.Backend().Remove(restic.Handle{
|
||||
if err := repo.Backend().Remove(ctx, restic.Handle{
|
||||
Type: restic.IndexFile,
|
||||
Name: id.String(),
|
||||
}); err != nil {
|
||||
|
@@ -50,6 +50,8 @@ func init() {
|
||||
}
|
||||
|
||||
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
ctx := gopts.ctx
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.Fatal("no snapshot ID specified")
|
||||
}
|
||||
@@ -79,7 +81,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
err = repo.LoadIndex(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -87,7 +89,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
var id restic.ID
|
||||
|
||||
if snapshotIDString == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(repo, opts.Paths, opts.Tags, opts.Host)
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
|
||||
if err != nil {
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
|
||||
}
|
||||
@@ -103,8 +105,10 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
Exitf(2, "creating restorer failed: %v\n", err)
|
||||
}
|
||||
|
||||
totalErrors := 0
|
||||
res.Error = func(dir string, node *restic.Node, err error) error {
|
||||
Warnf("error for %s: %+v\n", dir, err)
|
||||
Warnf("ignoring error for %s: %s\n", dir, err)
|
||||
totalErrors++
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,5 +138,9 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
||||
|
||||
return res.RestoreTo(opts.Target)
|
||||
err = res.RestoreTo(ctx, opts.Target)
|
||||
if totalErrors > 0 {
|
||||
Printf("There were %d errors\n", totalErrors)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -76,7 +76,7 @@ func changeTags(repo *repository.Repository, sn *restic.Snapshot, setTags, addTa
|
||||
}
|
||||
|
||||
// Save the new snapshot.
|
||||
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
|
||||
id, err := repo.SaveJSONUnpacked(context.TODO(), restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func changeTags(repo *repository.Repository, sn *restic.Snapshot, setTags, addTa
|
||||
|
||||
// Remove the old snapshot.
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
if err = repo.Backend().Remove(h); err != nil {
|
||||
if err = repo.Backend().Remove(context.TODO(), h); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -41,7 +42,7 @@ func runUnlock(opts UnlockOptions, gopts GlobalOptions) error {
|
||||
fn = restic.RemoveAllLocks
|
||||
}
|
||||
|
||||
err = fn(repo)
|
||||
err = fn(context.TODO(), repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, hos
|
||||
// Process all snapshot IDs given as arguments.
|
||||
for _, s := range snapshotIDs {
|
||||
if s == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(repo, paths, tags, host)
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, paths, tags, host)
|
||||
if err != nil {
|
||||
Warnf("Ignoring %q, no snapshot matched given filter (Paths:%v Tags:%v Host:%v)\n", s, paths, tags, host)
|
||||
usedFilter = true
|
||||
@@ -44,7 +44,7 @@ func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, hos
|
||||
}
|
||||
|
||||
for _, id := range ids.Uniq() {
|
||||
sn, err := restic.LoadSnapshot(repo, id)
|
||||
sn, err := restic.LoadSnapshot(ctx, repo, id)
|
||||
if err != nil {
|
||||
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
|
||||
continue
|
||||
@@ -58,15 +58,7 @@ func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, hos
|
||||
return
|
||||
}
|
||||
|
||||
for id := range repo.List(restic.SnapshotFile, ctx.Done()) {
|
||||
sn, err := restic.LoadSnapshot(repo, id)
|
||||
if err != nil {
|
||||
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
|
||||
continue
|
||||
}
|
||||
for _, sn := range restic.FindFilteredSnapshots(ctx, repo, host, tags, paths) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
24
src/cmds/restic/flags_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFlags checks for double defined flags, the commands will panic on
|
||||
// ParseFlags() when a shorthand flag is defined twice.
|
||||
func TestFlags(t *testing.T) {
|
||||
for _, cmd := range cmdRoot.Commands() {
|
||||
t.Run(cmd.Name(), func(t *testing.T) {
|
||||
cmd.Flags().SetOutput(ioutil.Discard)
|
||||
err := cmd.ParseFlags([]string{"--help"})
|
||||
if err.Error() == "pflag: help requested" {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -11,12 +11,15 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"restic/backend/b2"
|
||||
"restic/backend/local"
|
||||
"restic/backend/location"
|
||||
"restic/backend/rest"
|
||||
"restic/backend/s3"
|
||||
"restic/backend/sftp"
|
||||
"restic/backend/swift"
|
||||
"restic/debug"
|
||||
"restic/location"
|
||||
"restic/options"
|
||||
"restic/repository"
|
||||
|
||||
"restic/errors"
|
||||
@@ -38,6 +41,10 @@ type GlobalOptions struct {
|
||||
password string
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
|
||||
Options []string
|
||||
|
||||
extended options.Options
|
||||
}
|
||||
|
||||
var globalOptions = GlobalOptions{
|
||||
@@ -65,6 +72,8 @@ func init() {
|
||||
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
|
||||
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
|
||||
|
||||
f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
|
||||
|
||||
restoreTerminal()
|
||||
}
|
||||
|
||||
@@ -287,7 +296,7 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
return nil, errors.Fatal("Please specify repository location (-r)")
|
||||
}
|
||||
|
||||
be, err := open(opts.Repo)
|
||||
be, err := open(opts.Repo, opts.extended)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -301,7 +310,7 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
}
|
||||
}
|
||||
|
||||
err = s.SearchKey(opts.password, maxKeys)
|
||||
err = s.SearchKey(context.TODO(), opts.password, maxKeys)
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open repo: %v", err)
|
||||
}
|
||||
@@ -309,8 +318,92 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
|
||||
// only apply options for a particular backend here
|
||||
opts = opts.Extract(loc.Scheme)
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
cfg := loc.Config.(local.Config)
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening local repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "sftp":
|
||||
cfg := loc.Config.(sftp.Config)
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening sftp repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "s3":
|
||||
cfg := loc.Config.(s3.Config)
|
||||
if cfg.KeyID == "" {
|
||||
cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
}
|
||||
|
||||
if cfg.Secret == "" {
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening s3 repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "swift":
|
||||
cfg := loc.Config.(swift.Config)
|
||||
|
||||
if err := swift.ApplyEnvironment("", &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening swift repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "b2":
|
||||
cfg := loc.Config.(b2.Config)
|
||||
|
||||
if cfg.AccountID == "" {
|
||||
cfg.AccountID = os.Getenv("B2_ACCOUNT_ID")
|
||||
}
|
||||
|
||||
if cfg.Key == "" {
|
||||
cfg.Key = os.Getenv("B2_ACCOUNT_KEY")
|
||||
}
|
||||
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening b2 repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
case "rest":
|
||||
cfg := loc.Config.(rest.Config)
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening rest repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
// Open the backend specified by a location config.
|
||||
func open(s string) (restic.Backend, error) {
|
||||
func open(s string, opts options.Options) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", s)
|
||||
loc, err := location.Parse(s)
|
||||
if err != nil {
|
||||
@@ -319,27 +412,25 @@ func open(s string) (restic.Backend, error) {
|
||||
|
||||
var be restic.Backend
|
||||
|
||||
cfg, err := parseConfig(loc, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
debug.Log("opening local repository at %#v", loc.Config)
|
||||
be, err = local.Open(loc.Config.(string))
|
||||
be, err = local.Open(cfg.(local.Config))
|
||||
case "sftp":
|
||||
debug.Log("opening sftp repository at %#v", loc.Config)
|
||||
be, err = sftp.OpenWithConfig(loc.Config.(sftp.Config))
|
||||
be, err = sftp.Open(cfg.(sftp.Config))
|
||||
case "s3":
|
||||
cfg := loc.Config.(s3.Config)
|
||||
if cfg.KeyID == "" {
|
||||
cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
|
||||
}
|
||||
if cfg.Secret == "" {
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
debug.Log("opening s3 repository at %#v", cfg)
|
||||
be, err = s3.Open(cfg)
|
||||
be, err = s3.Open(cfg.(s3.Config))
|
||||
case "swift":
|
||||
be, err = swift.Open(cfg.(swift.Config))
|
||||
case "b2":
|
||||
be, err = b2.Open(cfg.(b2.Config))
|
||||
case "rest":
|
||||
be, err = rest.Open(loc.Config.(rest.Config))
|
||||
be, err = rest.Open(cfg.(rest.Config))
|
||||
|
||||
default:
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
@@ -348,38 +439,45 @@ func open(s string) (restic.Backend, error) {
|
||||
return nil, errors.Fatalf("unable to open repo at %v: %v", s, err)
|
||||
}
|
||||
|
||||
// check if config is there
|
||||
fi, err := be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, s)
|
||||
}
|
||||
|
||||
if fi.Size == 0 {
|
||||
return nil, errors.New("config file has zero size, invalid repository?")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Create the backend specified by URI.
|
||||
func create(s string) (restic.Backend, error) {
|
||||
func create(s string, opts options.Options) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", s)
|
||||
loc, err := location.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg, err := parseConfig(loc, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
debug.Log("create local repository at %#v", loc.Config)
|
||||
return local.Create(loc.Config.(string))
|
||||
return local.Create(cfg.(local.Config))
|
||||
case "sftp":
|
||||
debug.Log("create sftp repository at %#v", loc.Config)
|
||||
return sftp.CreateWithConfig(loc.Config.(sftp.Config))
|
||||
return sftp.Create(cfg.(sftp.Config))
|
||||
case "s3":
|
||||
cfg := loc.Config.(s3.Config)
|
||||
if cfg.KeyID == "" {
|
||||
cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
|
||||
}
|
||||
if cfg.Secret == "" {
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
debug.Log("create s3 repository at %#v", loc.Config)
|
||||
return s3.Open(cfg)
|
||||
return s3.Create(cfg.(s3.Config))
|
||||
case "swift":
|
||||
return swift.Open(cfg.(swift.Config))
|
||||
case "b2":
|
||||
return b2.Create(cfg.(b2.Config))
|
||||
case "rest":
|
||||
return rest.Open(loc.Config.(rest.Config))
|
||||
return rest.Create(cfg.(rest.Config))
|
||||
}
|
||||
|
||||
debug.Log("invalid repository scheme: %v", s)
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
)
|
||||
@@ -16,6 +17,7 @@ var (
|
||||
listenMemoryProfile string
|
||||
memProfilePath string
|
||||
cpuProfilePath string
|
||||
insecure bool
|
||||
|
||||
prof interface {
|
||||
Stop()
|
||||
@@ -27,6 +29,13 @@ func init() {
|
||||
f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
|
||||
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
|
||||
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
|
||||
f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
|
||||
}
|
||||
|
||||
type fakeTestingTB struct{}
|
||||
|
||||
func (fakeTestingTB) Logf(msg string, args ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, msg, args...)
|
||||
}
|
||||
|
||||
func runDebug() error {
|
||||
@@ -46,8 +55,12 @@ func runDebug() error {
|
||||
|
||||
if memProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.MemProfile, profile.ProfilePath(memProfilePath))
|
||||
} else if memProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.CPUProfile, profile.ProfilePath(memProfilePath))
|
||||
} else if cpuProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
|
||||
}
|
||||
|
||||
if insecure {
|
||||
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -1,10 +1,10 @@
|
||||
// +build ignore
|
||||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -55,17 +55,15 @@ func waitForMount(t testing.TB, dir string) {
|
||||
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
||||
}
|
||||
|
||||
func mount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
OK(t, cmd.Mount(dir))
|
||||
func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
opts := MountOptions{}
|
||||
OK(t, runMount(opts, gopts, []string{dir}))
|
||||
}
|
||||
|
||||
func umount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
|
||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = cmd.Umount(dir); err == nil {
|
||||
if err = umount(dir); err == nil {
|
||||
t.Logf("directory %v umounted", dir)
|
||||
return
|
||||
}
|
||||
@@ -87,9 +85,10 @@ func listSnapshots(t testing.TB, dir string) []string {
|
||||
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||
go mount(t, global, mountpoint)
|
||||
|
||||
go testRunMount(t, global, mountpoint)
|
||||
waitForMount(t, mountpoint)
|
||||
defer umount(t, global, mountpoint)
|
||||
defer testRunUmount(t, global, mountpoint)
|
||||
|
||||
if !snapshotsDirExists(t, mountpoint) {
|
||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
@@ -110,7 +109,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
OK(t, err)
|
||||
|
||||
ts := snapshot.Time.Format(time.RFC3339)
|
||||
@@ -144,45 +143,46 @@ func TestMount(t *testing.T) {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
|
||||
cmdInit(t, global)
|
||||
repo, err := global.OpenRepository()
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
OK(t, err)
|
||||
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
testRunInit(t, gopts)
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
OK(t, err)
|
||||
|
||||
// We remove the mountpoint now to check that cmdMount creates it
|
||||
RemoveAll(t, mountpoint)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, []restic.ID{})
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, []restic.ID{})
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
|
||||
// first backup
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs := cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// second backup, implicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// third backup, explicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
|
||||
testRunBackup(t, []string{env.testdata}, bopts, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -191,10 +191,10 @@ func TestMountSameTimestamps(t *testing.T) {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
|
||||
repo, err := global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
OK(t, err)
|
||||
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
@@ -206,6 +206,6 @@ func TestMountSameTimestamps(t *testing.T) {
|
||||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||
}
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, ids)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, ids)
|
||||
})
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"restic/options"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
@@ -199,6 +200,7 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
|
||||
password: TestPassword,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
extended: make(options.Options),
|
||||
}
|
||||
|
||||
// always overwrite global options
|
||||
|
@@ -52,11 +52,6 @@ func nlink(info os.FileInfo) uint64 {
|
||||
return uint64(stat.Nlink)
|
||||
}
|
||||
|
||||
func inode(info os.FileInfo) uint64 {
|
||||
stat, _ := info.Sys().(*syscall.Stat_t)
|
||||
return uint64(stat.Ino)
|
||||
}
|
||||
|
||||
func createFileSetPerHardlink(dir string) map[uint64][]string {
|
||||
var stat syscall.Stat_t
|
||||
linkTests := make(map[uint64][]string)
|
||||
|
@@ -149,18 +149,20 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func testRunFind(t testing.TB, gopts GlobalOptions, pattern string) []string {
|
||||
func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
globalOptions.stdout = buf
|
||||
globalOptions.JSON = wantJSON
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
globalOptions.JSON = false
|
||||
}()
|
||||
|
||||
opts := FindOptions{}
|
||||
|
||||
OK(t, runFind(opts, gopts, []string{pattern}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
|
||||
@@ -557,7 +559,7 @@ func TestBackupExclude(t *testing.T) {
|
||||
|
||||
opts.Excludes = []string{"*.tar.gz", "private/secret"}
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, gopts))
|
||||
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, gopts))
|
||||
files = testRunLs(t, gopts, snapshotID)
|
||||
Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||
@@ -1037,14 +1039,61 @@ func TestFind(t *testing.T) {
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
results := testRunFind(t, gopts, "unexistingfile")
|
||||
Assert(t, len(results) != 0, "unexisting file found in repo (%v)", datafile)
|
||||
results := testRunFind(t, false, gopts, "unexistingfile")
|
||||
Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, gopts, "testfile")
|
||||
Assert(t, len(results) != 1, "file not found in repo (%v)", datafile)
|
||||
results = testRunFind(t, false, gopts, "testfile")
|
||||
lines := strings.Split(string(results), "\n")
|
||||
Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, gopts, "test")
|
||||
Assert(t, len(results) < 2, "less than two file found in repo (%v)", datafile)
|
||||
results = testRunFind(t, false, gopts, "testfile*")
|
||||
lines = strings.Split(string(results), "\n")
|
||||
Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
|
||||
})
|
||||
}
|
||||
|
||||
type testMatch struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Permissions string `json:"permissions,omitempty"`
|
||||
Size uint64 `json:"size,omitempty"`
|
||||
Date time.Time `json:"date,omitempty"`
|
||||
UID uint32 `json:"uid,omitempty"`
|
||||
GID uint32 `json:"gid,omitempty"`
|
||||
}
|
||||
|
||||
type testMatches struct {
|
||||
Hits int `json:"hits,omitempty"`
|
||||
SnapshotID string `json:"snapshot,omitempty"`
|
||||
Matches []testMatch `json:"matches,omitempty"`
|
||||
}
|
||||
|
||||
func TestFindJSON(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
testRunInit(t, gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
results := testRunFind(t, true, gopts, "unexistingfile")
|
||||
matches := []testMatches{}
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, true, gopts, "testfile")
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
|
||||
Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, true, gopts, "testfile*")
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
|
||||
Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
|
||||
})
|
||||
}
|
||||
|
||||
|
39
src/cmds/restic/local_layout_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
. "restic/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRestoreLocalLayout(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
var tests = []struct {
|
||||
filename string
|
||||
layout string
|
||||
}{
|
||||
{"repo-layout-default.tar.gz", ""},
|
||||
{"repo-layout-s3legacy.tar.gz", ""},
|
||||
{"repo-layout-default.tar.gz", "default"},
|
||||
{"repo-layout-s3legacy.tar.gz", "s3legacy"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
datafile := filepath.Join("..", "..", "restic", "backend", "testdata", test.filename)
|
||||
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
gopts.extended["local.layout"] = test.layout
|
||||
|
||||
// check the repo
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
// restore latest snapshot
|
||||
target := filepath.Join(env.base, "restore")
|
||||
testRunRestoreLatest(t, gopts, target, nil, "")
|
||||
|
||||
RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||
RemoveAll(t, target)
|
||||
}
|
||||
})
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -32,7 +33,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
|
||||
lockFn = restic.NewExclusiveLock
|
||||
}
|
||||
|
||||
lock, err := lockFn(repo)
|
||||
lock, err := lockFn(context.TODO(), repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -75,7 +76,7 @@ func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
debug.Log("refreshing locks")
|
||||
globalLocks.Lock()
|
||||
for _, lock := range globalLocks.locks {
|
||||
err := lock.Refresh()
|
||||
err := lock.Refresh(context.TODO())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err)
|
||||
}
|
||||
|
@@ -1,10 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/options"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -22,18 +27,39 @@ directories in an encrypted repository stored on different backends.
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
|
||||
// run the debug functions for all subcommands (if build tag "debug" is
|
||||
// enabled)
|
||||
PersistentPreRunE: func(*cobra.Command, []string) error {
|
||||
return runDebug()
|
||||
// parse extended options
|
||||
opts, err := options.Parse(globalOptions.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
globalOptions.extended = opts
|
||||
|
||||
// run the debug functions for all subcommands (if build tag "debug" is
|
||||
// enabled)
|
||||
if err := runDebug(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
PersistentPostRun: func(*cobra.Command, []string) {
|
||||
shutdownDebug()
|
||||
},
|
||||
}
|
||||
|
||||
var logBuffer = bytes.NewBuffer(nil)
|
||||
|
||||
func init() {
|
||||
// install custom global logger into a buffer, if an error occurs
|
||||
// we can show the logs
|
||||
log.SetOutput(logBuffer)
|
||||
}
|
||||
|
||||
func main() {
|
||||
debug.Log("main %#v", os.Args)
|
||||
debug.Log("restic %s, compiled with %v on %v/%v",
|
||||
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
err := cmdRoot.Execute()
|
||||
|
||||
switch {
|
||||
@@ -43,6 +69,14 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
case err != nil:
|
||||
fmt.Fprintf(os.Stderr, "%+v\n", err)
|
||||
|
||||
if logBuffer.Len() > 0 {
|
||||
fmt.Fprintf(os.Stderr, "also, the following messages were logged by a library:\n")
|
||||
sc := bufio.NewScanner(logBuffer)
|
||||
for sc.Scan() {
|
||||
fmt.Fprintln(os.Stderr, sc.Text())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var exitCode int
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
@@ -20,7 +21,7 @@ type Reader struct {
|
||||
}
|
||||
|
||||
// Archive reads data from the reader and saves it to the repo.
|
||||
func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
|
||||
func (r *Reader) Archive(ctx context.Context, name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
|
||||
if name == "" {
|
||||
return nil, restic.ID{}, errors.New("no filename given")
|
||||
}
|
||||
@@ -53,7 +54,7 @@ func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic
|
||||
id := restic.Hash(chunk.Data)
|
||||
|
||||
if !repo.Index().Has(id, restic.DataBlob) {
|
||||
_, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id)
|
||||
_, err := repo.SaveBlob(ctx, restic.DataBlob, chunk.Data, id)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
@@ -72,7 +73,7 @@ func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic
|
||||
|
||||
tree := &restic.Tree{
|
||||
Nodes: []*restic.Node{
|
||||
&restic.Node{
|
||||
{
|
||||
Name: name,
|
||||
AccessTime: time.Now(),
|
||||
ModTime: time.Now(),
|
||||
@@ -87,14 +88,14 @@ func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic
|
||||
},
|
||||
}
|
||||
|
||||
treeID, err := repo.SaveTree(tree)
|
||||
treeID, err := repo.SaveTree(ctx, tree)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
sn.Tree = &treeID
|
||||
debug.Log("tree saved as %v", treeID.Str())
|
||||
|
||||
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
|
||||
id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
@@ -106,7 +107,7 @@ func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
err = repo.SaveIndex()
|
||||
err = repo.SaveIndex(ctx)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package archiver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
|
||||
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
|
||||
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
|
||||
}
|
||||
@@ -21,7 +22,7 @@ func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) in
|
||||
}
|
||||
|
||||
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
|
||||
tree, err := repo.LoadTree(treeID)
|
||||
tree, err := repo.LoadTree(context.TODO(), treeID)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadTree() returned error %v", err)
|
||||
}
|
||||
@@ -85,7 +86,7 @@ func TestArchiveReader(t *testing.T) {
|
||||
Tags: []string{"test"},
|
||||
}
|
||||
|
||||
sn, id, err := r.Archive("fakefile", f, nil)
|
||||
sn, id, err := r.Archive(context.TODO(), "fakefile", f, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("ArchiveReader() returned error %v", err)
|
||||
}
|
||||
@@ -111,7 +112,7 @@ func TestArchiveReaderNull(t *testing.T) {
|
||||
Tags: []string{"test"},
|
||||
}
|
||||
|
||||
sn, id, err := r.Archive("fakefile", bytes.NewReader(nil), nil)
|
||||
sn, id, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(nil), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("ArchiveReader() returned error %v", err)
|
||||
}
|
||||
@@ -132,11 +133,8 @@ func (e errReader) Read([]byte) (int, error) {
|
||||
}
|
||||
|
||||
func countSnapshots(t testing.TB, repo restic.Repository) int {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
snapshots := 0
|
||||
for range repo.List(restic.SnapshotFile, done) {
|
||||
for range repo.List(context.TODO(), restic.SnapshotFile) {
|
||||
snapshots++
|
||||
}
|
||||
return snapshots
|
||||
@@ -152,7 +150,7 @@ func TestArchiveReaderError(t *testing.T) {
|
||||
Tags: []string{"test"},
|
||||
}
|
||||
|
||||
sn, id, err := r.Archive("fakefile", errReader("error returned by reading stdin"), nil)
|
||||
sn, id, err := r.Archive(context.TODO(), "fakefile", errReader("error returned by reading stdin"), nil)
|
||||
if err == nil {
|
||||
t.Errorf("expected error not returned")
|
||||
}
|
||||
@@ -195,7 +193,7 @@ func BenchmarkArchiveReader(t *testing.B) {
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, _, err := r.Archive("fakefile", bytes.NewReader(buf), nil)
|
||||
_, _, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(buf), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -92,7 +93,7 @@ func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
|
||||
}
|
||||
|
||||
// Save stores a blob read from rd in the repository.
|
||||
func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
|
||||
func (arch *Archiver) Save(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error {
|
||||
debug.Log("Save(%v, %v)\n", t, id.Str())
|
||||
|
||||
if arch.isKnownBlob(id, restic.DataBlob) {
|
||||
@@ -100,7 +101,7 @@ func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := arch.repo.SaveBlob(t, data, id)
|
||||
_, err := arch.repo.SaveBlob(ctx, t, data, id)
|
||||
if err != nil {
|
||||
debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err)
|
||||
return err
|
||||
@@ -111,7 +112,7 @@ func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
|
||||
}
|
||||
|
||||
// SaveTreeJSON stores a tree in the repository.
|
||||
func (arch *Archiver) SaveTreeJSON(tree *restic.Tree) (restic.ID, error) {
|
||||
func (arch *Archiver) SaveTreeJSON(ctx context.Context, tree *restic.Tree) (restic.ID, error) {
|
||||
data, err := json.Marshal(tree)
|
||||
if err != nil {
|
||||
return restic.ID{}, errors.Wrap(err, "Marshal")
|
||||
@@ -124,7 +125,7 @@ func (arch *Archiver) SaveTreeJSON(tree *restic.Tree) (restic.ID, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
return arch.repo.SaveBlob(restic.TreeBlob, data, id)
|
||||
return arch.repo.SaveBlob(ctx, restic.TreeBlob, data, id)
|
||||
}
|
||||
|
||||
func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
|
||||
@@ -153,13 +154,14 @@ type saveResult struct {
|
||||
bytes uint64
|
||||
}
|
||||
|
||||
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
||||
func (arch *Archiver) saveChunk(ctx context.Context, chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
||||
defer freeBuf(chunk.Data)
|
||||
|
||||
id := restic.Hash(chunk.Data)
|
||||
err := arch.Save(restic.DataBlob, chunk.Data, id)
|
||||
err := arch.Save(ctx, restic.DataBlob, chunk.Data, id)
|
||||
// TODO handle error
|
||||
if err != nil {
|
||||
debug.Log("Save(%v) failed: %v", id.Str(), err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -206,7 +208,7 @@ func updateNodeContent(node *restic.Node, results []saveResult) error {
|
||||
|
||||
// SaveFile stores the content of the file on the backend as a Blob by calling
|
||||
// Save for each chunk.
|
||||
func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.Node, error) {
|
||||
func (arch *Archiver) SaveFile(ctx context.Context, p *restic.Progress, node *restic.Node) (*restic.Node, error) {
|
||||
file, err := fs.Open(node.Path)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
@@ -234,7 +236,7 @@ func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.N
|
||||
}
|
||||
|
||||
resCh := make(chan saveResult, 1)
|
||||
go arch.saveChunk(chunk, p, <-arch.blobToken, file, resCh)
|
||||
go arch.saveChunk(ctx, chunk, p, <-arch.blobToken, file, resCh)
|
||||
resultChannels = append(resultChannels, resCh)
|
||||
}
|
||||
|
||||
@@ -247,7 +249,7 @@ func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.N
|
||||
return node, err
|
||||
}
|
||||
|
||||
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
|
||||
func (arch *Archiver) fileWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, entCh <-chan pipe.Entry) {
|
||||
defer func() {
|
||||
debug.Log("done")
|
||||
wg.Done()
|
||||
@@ -305,7 +307,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
|
||||
// otherwise read file normally
|
||||
if node.Type == "file" && len(node.Content) == 0 {
|
||||
debug.Log(" read and save %v", e.Path())
|
||||
node, err = arch.SaveFile(p, node)
|
||||
node, err = arch.SaveFile(ctx, p, node)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
|
||||
arch.Warn(e.Path(), nil, err)
|
||||
@@ -322,14 +324,14 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
|
||||
debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content))
|
||||
e.Result() <- node
|
||||
p.Report(restic.Stat{Files: 1})
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
// pipeline was cancelled
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
|
||||
func (arch *Archiver) dirWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, dirCh <-chan pipe.Dir) {
|
||||
debug.Log("start")
|
||||
defer func() {
|
||||
debug.Log("done")
|
||||
@@ -398,7 +400,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
|
||||
node.Error = err.Error()
|
||||
}
|
||||
|
||||
id, err := arch.SaveTreeJSON(tree)
|
||||
id, err := arch.SaveTreeJSON(ctx, tree)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -415,7 +417,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
|
||||
if dir.Path() != "" {
|
||||
p.Report(restic.Stat{Dirs: 1})
|
||||
}
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
// pipeline was cancelled
|
||||
return
|
||||
}
|
||||
@@ -427,7 +429,7 @@ type archivePipe struct {
|
||||
New <-chan pipe.Job
|
||||
}
|
||||
|
||||
func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||
func copyJobs(ctx context.Context, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||
var (
|
||||
// disable sending on the outCh until we received a job
|
||||
outCh chan<- pipe.Job
|
||||
@@ -439,7 +441,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job, ok = <-inCh:
|
||||
if !ok {
|
||||
@@ -462,7 +464,7 @@ type archiveJob struct {
|
||||
new pipe.Job
|
||||
}
|
||||
|
||||
func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
func (a *archivePipe) compare(ctx context.Context, out chan<- pipe.Job) {
|
||||
defer func() {
|
||||
close(out)
|
||||
debug.Log("done")
|
||||
@@ -488,7 +490,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
out <- archiveJob{new: newJob}.Copy()
|
||||
}
|
||||
|
||||
copyJobs(done, a.New, out)
|
||||
copyJobs(ctx, a.New, out)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -585,7 +587,7 @@ func (j archiveJob) Copy() pipe.Job {
|
||||
const saveIndexTime = 30 * time.Second
|
||||
|
||||
// saveIndexes regularly queries the master index for full indexes and saves them.
|
||||
func (arch *Archiver) saveIndexes(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
func (arch *Archiver) saveIndexes(ctx context.Context, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
ticker := time.NewTicker(saveIndexTime)
|
||||
@@ -593,11 +595,11 @@ func (arch *Archiver) saveIndexes(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
debug.Log("saving full indexes")
|
||||
err := arch.repo.SaveFullIndex()
|
||||
err := arch.repo.SaveFullIndex(ctx)
|
||||
if err != nil {
|
||||
debug.Log("save indexes returned an error: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err)
|
||||
@@ -634,7 +636,7 @@ func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
|
||||
// used to compare the files to the ones archived at the time this snapshot was
|
||||
// taken.
|
||||
func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
|
||||
func (arch *Archiver) Snapshot(ctx context.Context, p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
|
||||
paths = unique(paths)
|
||||
sort.Sort(baseNameSlice(paths))
|
||||
|
||||
@@ -643,7 +645,6 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
debug.RunHook("Archiver.Snapshot", nil)
|
||||
|
||||
// signal the whole pipeline to stop
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
|
||||
p.Start()
|
||||
@@ -663,14 +664,14 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
sn.Parent = parentID
|
||||
|
||||
// load parent snapshot
|
||||
parent, err := restic.LoadSnapshot(arch.repo, *parentID)
|
||||
parent, err := restic.LoadSnapshot(ctx, arch.repo, *parentID)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
// start walker on old tree
|
||||
ch := make(chan walk.TreeJob)
|
||||
go walk.Tree(arch.repo, *parent.Tree, done, ch)
|
||||
go walk.Tree(ctx, arch.repo, *parent.Tree, ch)
|
||||
jobs.Old = ch
|
||||
} else {
|
||||
// use closed channel
|
||||
@@ -683,13 +684,13 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
pipeCh := make(chan pipe.Job)
|
||||
resCh := make(chan pipe.Result, 1)
|
||||
go func() {
|
||||
pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh)
|
||||
pipe.Walk(ctx, paths, arch.SelectFilter, pipeCh, resCh)
|
||||
debug.Log("pipe.Walk done")
|
||||
}()
|
||||
jobs.New = pipeCh
|
||||
|
||||
ch := make(chan pipe.Job)
|
||||
go jobs.compare(done, ch)
|
||||
go jobs.compare(ctx, ch)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
entCh := make(chan pipe.Entry)
|
||||
@@ -708,22 +709,22 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
// run workers
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(2)
|
||||
go arch.fileWorker(&wg, p, done, entCh)
|
||||
go arch.dirWorker(&wg, p, done, dirCh)
|
||||
go arch.fileWorker(ctx, &wg, p, entCh)
|
||||
go arch.dirWorker(ctx, &wg, p, dirCh)
|
||||
}
|
||||
|
||||
// run index saver
|
||||
var wgIndexSaver sync.WaitGroup
|
||||
stopIndexSaver := make(chan struct{})
|
||||
indexCtx, indexCancel := context.WithCancel(ctx)
|
||||
wgIndexSaver.Add(1)
|
||||
go arch.saveIndexes(&wgIndexSaver, stopIndexSaver)
|
||||
go arch.saveIndexes(indexCtx, &wgIndexSaver)
|
||||
|
||||
// wait for all workers to terminate
|
||||
debug.Log("wait for workers")
|
||||
wg.Wait()
|
||||
|
||||
// stop index saver
|
||||
close(stopIndexSaver)
|
||||
indexCancel()
|
||||
wgIndexSaver.Wait()
|
||||
|
||||
debug.Log("workers terminated")
|
||||
@@ -740,7 +741,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
sn.Tree = root.Subtree
|
||||
|
||||
// load top-level tree again to see if it is empty
|
||||
toptree, err := arch.repo.LoadTree(*root.Subtree)
|
||||
toptree, err := arch.repo.LoadTree(ctx, *root.Subtree)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
@@ -750,7 +751,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
}
|
||||
|
||||
// save index
|
||||
err = arch.repo.SaveIndex()
|
||||
err = arch.repo.SaveIndex(ctx)
|
||||
if err != nil {
|
||||
debug.Log("error saving index: %v", err)
|
||||
return nil, restic.ID{}, err
|
||||
@@ -759,7 +760,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostnam
|
||||
debug.Log("saved indexes")
|
||||
|
||||
// save snapshot
|
||||
id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
|
||||
id, err := arch.repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package archiver_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
@@ -39,33 +40,33 @@ func randomID() restic.ID {
|
||||
func forgetfulBackend() restic.Backend {
|
||||
be := &mock.Backend{}
|
||||
|
||||
be.TestFn = func(h restic.Handle) (bool, error) {
|
||||
be.TestFn = func(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
be.LoadFn = func(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
be.LoadFn = func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
||||
be.SaveFn = func(h restic.Handle, rd io.Reader) error {
|
||||
be.SaveFn = func(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
be.StatFn = func(h restic.Handle) (restic.FileInfo, error) {
|
||||
be.StatFn = func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
return restic.FileInfo{}, errors.New("not found")
|
||||
}
|
||||
|
||||
be.RemoveFn = func(h restic.Handle) error {
|
||||
be.RemoveFn = func(ctx context.Context, h restic.Handle) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
be.ListFn = func(ctx context.Context, t restic.FileType) <-chan string {
|
||||
ch := make(chan string)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
be.DeleteFn = func() error {
|
||||
be.DeleteFn = func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -80,7 +81,7 @@ func testArchiverDuplication(t *testing.T) {
|
||||
|
||||
repo := repository.New(forgetfulBackend())
|
||||
|
||||
err = repo.Init("foo")
|
||||
err = repo.Init(context.TODO(), "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -108,7 +109,7 @@ func testArchiverDuplication(t *testing.T) {
|
||||
|
||||
buf := make([]byte, 50)
|
||||
|
||||
err := arch.Save(restic.DataBlob, buf, id)
|
||||
err := arch.Save(context.TODO(), restic.DataBlob, buf, id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -127,7 +128,7 @@ func testArchiverDuplication(t *testing.T) {
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := repo.SaveFullIndex()
|
||||
err := repo.SaveFullIndex(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -83,10 +84,10 @@ func (j testPipeJob) Error() error { return j.err }
|
||||
func (j testPipeJob) Info() os.FileInfo { return j.fi }
|
||||
func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
|
||||
|
||||
func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) {
|
||||
func testTreeWalker(ctx context.Context, out chan<- walk.TreeJob) {
|
||||
for _, e := range treeJobs {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case out <- walk.TreeJob{Path: e}:
|
||||
}
|
||||
@@ -95,10 +96,10 @@ func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) {
|
||||
close(out)
|
||||
}
|
||||
|
||||
func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
func testPipeWalker(ctx context.Context, out chan<- pipe.Job) {
|
||||
for _, e := range pipeJobs {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case out <- testPipeJob{path: e}:
|
||||
}
|
||||
@@ -108,19 +109,19 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
}
|
||||
|
||||
func TestArchivePipe(t *testing.T) {
|
||||
done := make(chan struct{})
|
||||
ctx := context.TODO()
|
||||
|
||||
treeCh := make(chan walk.TreeJob)
|
||||
pipeCh := make(chan pipe.Job)
|
||||
|
||||
go testTreeWalker(done, treeCh)
|
||||
go testPipeWalker(done, pipeCh)
|
||||
go testTreeWalker(ctx, treeCh)
|
||||
go testPipeWalker(ctx, pipeCh)
|
||||
|
||||
p := archivePipe{Old: treeCh, New: pipeCh}
|
||||
|
||||
ch := make(chan pipe.Job)
|
||||
|
||||
go p.compare(done, ch)
|
||||
go p.compare(ctx, ch)
|
||||
|
||||
i := 0
|
||||
for job := range ch {
|
||||
|
@@ -2,6 +2,7 @@ package archiver_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -42,7 +43,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
|
||||
Assert(b, uint(len(chunk.Data)) == chunk.Length,
|
||||
"invalid length: got %d, expected %d", len(chunk.Data), chunk.Length)
|
||||
|
||||
_, err = crypto.Encrypt(key, buf2, chunk.Data)
|
||||
_, err = key.Encrypt(buf2, chunk.Data)
|
||||
OK(b, err)
|
||||
}
|
||||
}
|
||||
@@ -75,7 +76,7 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
|
||||
}
|
||||
|
||||
// reduce length of chunkBuf
|
||||
crypto.Encrypt(key, chunk.Data, chunk.Data)
|
||||
key.Encrypt(chunk.Data, chunk.Data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,7 +105,7 @@ func archiveDirectory(b testing.TB) {
|
||||
|
||||
arch := archiver.New(repo)
|
||||
|
||||
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, "localhost", nil)
|
||||
_, id, err := arch.Snapshot(context.TODO(), nil, []string{BenchArchiveDirectory}, nil, "localhost", nil)
|
||||
OK(b, err)
|
||||
|
||||
b.Logf("snapshot archived as %v", id)
|
||||
@@ -129,7 +130,7 @@ func BenchmarkArchiveDirectory(b *testing.B) {
|
||||
}
|
||||
|
||||
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
|
||||
for _ = range repo.Backend().List(t, nil) {
|
||||
for range repo.Backend().List(context.TODO(), t) {
|
||||
n++
|
||||
}
|
||||
|
||||
@@ -234,7 +235,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||
|
||||
id := restic.Hash(c.Data)
|
||||
time.Sleep(time.Duration(id[0]))
|
||||
err := arch.Save(restic.DataBlob, c.Data, id)
|
||||
err := arch.Save(context.TODO(), restic.DataBlob, c.Data, id)
|
||||
<-barrier
|
||||
errChan <- err
|
||||
}(c, errChan)
|
||||
@@ -246,7 +247,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||
}
|
||||
|
||||
OK(t, repo.Flush())
|
||||
OK(t, repo.SaveIndex())
|
||||
OK(t, repo.SaveIndex(context.TODO()))
|
||||
|
||||
chkr := createAndInitChecker(t, repo)
|
||||
assertNoUnreferencedPacks(t, chkr)
|
||||
@@ -271,7 +272,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
|
||||
func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
|
||||
chkr := checker.New(repo)
|
||||
|
||||
hints, errs := chkr.LoadIndex()
|
||||
hints, errs := chkr.LoadIndex(context.TODO())
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
@@ -284,11 +285,8 @@ func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker
|
||||
}
|
||||
|
||||
func assertNoUnreferencedPacks(t *testing.T, chkr *checker.Checker) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
errChan := make(chan error)
|
||||
go chkr.Packs(errChan, done)
|
||||
go chkr.Packs(context.TODO(), errChan)
|
||||
|
||||
for err := range errChan {
|
||||
OK(t, err)
|
||||
@@ -301,7 +299,7 @@ func TestArchiveEmptySnapshot(t *testing.T) {
|
||||
|
||||
arch := archiver.New(repo)
|
||||
|
||||
sn, id, err := arch.Snapshot(nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil)
|
||||
sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for empty snapshot, got nil")
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic"
|
||||
"testing"
|
||||
)
|
||||
@@ -8,7 +9,7 @@ import (
|
||||
// TestSnapshot creates a new snapshot of path.
|
||||
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
|
||||
arch := New(repo)
|
||||
sn, _, err := arch.Snapshot(nil, []string{path}, []string{"test"}, "localhost", parent)
|
||||
sn, _, err := arch.Snapshot(context.TODO(), nil, []string{path}, []string{"test"}, "localhost", parent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package restic
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Backend is used to store and access data.
|
||||
type Backend interface {
|
||||
@@ -9,30 +12,34 @@ type Backend interface {
|
||||
Location() string
|
||||
|
||||
// Test a boolean value whether a File with the name and type exists.
|
||||
Test(h Handle) (bool, error)
|
||||
Test(ctx context.Context, h Handle) (bool, error)
|
||||
|
||||
// Remove removes a File with type t and name.
|
||||
Remove(h Handle) error
|
||||
Remove(ctx context.Context, h Handle) error
|
||||
|
||||
// Close the backend
|
||||
Close() error
|
||||
|
||||
// Save stores the data in the backend under the given handle.
|
||||
Save(h Handle, rd io.Reader) error
|
||||
Save(ctx context.Context, h Handle, rd io.Reader) error
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
// given offset. If length is larger than zero, only a portion of the file
|
||||
// is returned. rd must be closed after use. If an error is returned, the
|
||||
// ReadCloser must be nil.
|
||||
Load(h Handle, length int, offset int64) (io.ReadCloser, error)
|
||||
Load(ctx context.Context, h Handle, length int, offset int64) (io.ReadCloser, error)
|
||||
|
||||
// Stat returns information about the File identified by h.
|
||||
Stat(h Handle) (FileInfo, error)
|
||||
Stat(ctx context.Context, h Handle) (FileInfo, error)
|
||||
|
||||
// List returns a channel that yields all names of files of type t in an
|
||||
// arbitrary order. A goroutine is started for this. If the channel done is
|
||||
// closed, sending stops.
|
||||
List(t FileType, done <-chan struct{}) <-chan string
|
||||
// arbitrary order. A goroutine is started for this, which is stopped when
|
||||
// ctx is cancelled.
|
||||
List(ctx context.Context, t FileType) <-chan string
|
||||
|
||||
// IsNotExist returns true if the error was caused by a non-existing file
|
||||
// in the backend.
|
||||
IsNotExist(err error) bool
|
||||
}
|
||||
|
||||
// FileInfo is returned by Stat() and contains information about a file in the
|
||||
|
377
src/restic/backend/b2/b2.go
Normal file
@@ -0,0 +1,377 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"restic"
|
||||
"strings"
|
||||
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
|
||||
"github.com/kurin/blazer/b2"
|
||||
)
|
||||
|
||||
// b2Backend is a backend which stores its data on Backblaze B2.
|
||||
type b2Backend struct {
|
||||
client *b2.Client
|
||||
bucket *b2.Bucket
|
||||
cfg Config
|
||||
backend.Layout
|
||||
sem *backend.Semaphore
|
||||
}
|
||||
|
||||
// ensure statically that *b2Backend implements restic.Backend.
|
||||
var _ restic.Backend = &b2Backend{}
|
||||
|
||||
func newClient(ctx context.Context, cfg Config) (*b2.Client, error) {
|
||||
opts := []b2.ClientOption{b2.Transport(backend.Transport())}
|
||||
|
||||
c, err := b2.NewClient(ctx, cfg.AccountID, cfg.Key, opts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2.NewClient")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Open opens a connection to the B2 service.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
debug.Log("cfg %#v", cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
client, err := newClient(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bucket, err := client.Bucket(ctx, cfg.Bucket)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Bucket")
|
||||
}
|
||||
|
||||
sem, err := backend.NewSemaphore(cfg.Connections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
be := &b2Backend{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: &backend.DefaultLayout{
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
sem: sem,
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Create opens a connection to the B2 service. If the bucket does not exist yet,
|
||||
// it is created.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
debug.Log("cfg %#v", cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
client, err := newClient(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attr := b2.BucketAttrs{
|
||||
Type: b2.Private,
|
||||
}
|
||||
bucket, err := client.NewBucket(ctx, cfg.Bucket, &attr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewBucket")
|
||||
}
|
||||
|
||||
sem, err := backend.NewSemaphore(cfg.Connections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
be := &b2Backend{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: &backend.DefaultLayout{
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
sem: sem,
|
||||
}
|
||||
|
||||
present, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if present {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Location returns the location for the backend.
|
||||
func (be *b2Backend) Location() string {
|
||||
return be.cfg.Bucket
|
||||
}
|
||||
|
||||
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
|
||||
type wrapReader struct {
|
||||
io.ReadCloser
|
||||
eofSeen bool
|
||||
f func()
|
||||
}
|
||||
|
||||
func (wr *wrapReader) Read(p []byte) (int, error) {
|
||||
if wr.eofSeen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err := wr.ReadCloser.Read(p)
|
||||
if err == io.EOF {
|
||||
wr.eofSeen = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (wr *wrapReader) Close() error {
|
||||
err := wr.ReadCloser.Close()
|
||||
wr.f()
|
||||
return err
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a non-existing file.
|
||||
func (be *b2Backend) IsNotExist(err error) bool {
|
||||
return b2.IsNotExist(errors.Cause(err))
|
||||
}
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
|
||||
if err := h.Valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, errors.New("offset is negative")
|
||||
}
|
||||
|
||||
if length < 0 {
|
||||
return nil, errors.Errorf("invalid length %d", length)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
be.sem.GetToken()
|
||||
|
||||
name := be.Layout.Filename(h)
|
||||
obj := be.bucket.Object(name)
|
||||
|
||||
if offset == 0 && length == 0 {
|
||||
rd := obj.NewReader(ctx)
|
||||
wrapper := &wrapReader{
|
||||
ReadCloser: rd,
|
||||
f: func() {
|
||||
cancel()
|
||||
be.sem.ReleaseToken()
|
||||
},
|
||||
}
|
||||
return wrapper, nil
|
||||
}
|
||||
|
||||
// pass a negative length to NewRangeReader so that the remainder of the
|
||||
// file is read.
|
||||
if length == 0 {
|
||||
length = -1
|
||||
}
|
||||
|
||||
rd := obj.NewRangeReader(ctx, offset, int64(length))
|
||||
wrapper := &wrapReader{
|
||||
ReadCloser: rd,
|
||||
f: func() {
|
||||
cancel()
|
||||
be.sem.ReleaseToken()
|
||||
},
|
||||
}
|
||||
return wrapper, nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
be.sem.GetToken()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
name := be.Filename(h)
|
||||
debug.Log("Save %v, name %v", h, name)
|
||||
obj := be.bucket.Object(name)
|
||||
|
||||
_, err = obj.Attrs(ctx)
|
||||
if err == nil {
|
||||
debug.Log(" %v already exists", h)
|
||||
return errors.New("key already exists")
|
||||
}
|
||||
|
||||
w := obj.NewWriter(ctx)
|
||||
n, err := io.Copy(w, rd)
|
||||
debug.Log(" saved %d bytes, err %v", n, err)
|
||||
|
||||
if err != nil {
|
||||
_ = w.Close()
|
||||
return errors.Wrap(err, "Copy")
|
||||
}
|
||||
|
||||
return errors.Wrap(w.Close(), "Close")
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (be *b2Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
|
||||
debug.Log("Stat %v", h)
|
||||
|
||||
be.sem.GetToken()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
name := be.Filename(h)
|
||||
obj := be.bucket.Object(name)
|
||||
info, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
debug.Log("Attrs() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Stat")
|
||||
}
|
||||
return restic.FileInfo{Size: info.Size}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (be *b2Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
debug.Log("Test %v", h)
|
||||
|
||||
be.sem.GetToken()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
found := false
|
||||
name := be.Filename(h)
|
||||
obj := be.bucket.Object(name)
|
||||
info, err := obj.Attrs(ctx)
|
||||
if err == nil && info != nil && info.Status == b2.Uploaded {
|
||||
found = true
|
||||
}
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (be *b2Backend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
debug.Log("Remove %v", h)
|
||||
|
||||
be.sem.GetToken()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
obj := be.bucket.Object(be.Filename(h))
|
||||
return errors.Wrap(obj.Delete(ctx), "Delete")
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (be *b2Backend) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
debug.Log("List %v", t)
|
||||
ch := make(chan string)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
be.sem.GetToken()
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer cancel()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
prefix := be.Dirname(restic.Handle{Type: t})
|
||||
cur := &b2.Cursor{Prefix: prefix}
|
||||
|
||||
for {
|
||||
objs, c, err := be.bucket.ListCurrentObjects(ctx, 1000, cur)
|
||||
if err != nil && err != io.EOF {
|
||||
return
|
||||
}
|
||||
for _, obj := range objs {
|
||||
// Skip objects returned that do not have the specified prefix.
|
||||
if !strings.HasPrefix(obj.Name(), prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
m := path.Base(obj.Name())
|
||||
if m == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- m:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
cur = c
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Remove keys for a specified backend type.
|
||||
func (be *b2Backend) removeKeys(ctx context.Context, t restic.FileType) error {
|
||||
debug.Log("removeKeys %v", t)
|
||||
for key := range be.List(ctx, t) {
|
||||
err := be.Remove(ctx, restic.Handle{Type: t, Name: key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
||||
func (be *b2Backend) Delete(ctx context.Context) error {
|
||||
alltypes := []restic.FileType{
|
||||
restic.DataFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
restic.SnapshotFile,
|
||||
restic.IndexFile}
|
||||
|
||||
for _, t := range alltypes {
|
||||
err := be.removeKeys(ctx, t)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && b2.IsNotExist(errors.Cause(err)) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (be *b2Backend) Close() error { return nil }
|
97
src/restic/backend/b2/b2_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic"
|
||||
"restic/backend/b2"
|
||||
"restic/backend/test"
|
||||
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func newB2TestSuite(t testing.TB) *test.Suite {
|
||||
return &test.Suite{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
||||
// wait for at most 10 seconds for removed files to disappear
|
||||
WaitForDelayedRemoval: 10 * time.Second,
|
||||
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
b2cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := b2cfg.(b2.Config)
|
||||
cfg.AccountID = os.Getenv("RESTIC_TEST_B2_ACCOUNT_ID")
|
||||
cfg.Key = os.Getenv("RESTIC_TEST_B2_ACCOUNT_KEY")
|
||||
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(b2.Config)
|
||||
return b2.Create(cfg)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(b2.Config)
|
||||
return b2.Open(cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(b2.Config)
|
||||
be, err := b2.Open(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testVars(t testing.TB) {
|
||||
vars := []string{
|
||||
"RESTIC_TEST_B2_ACCOUNT_ID",
|
||||
"RESTIC_TEST_B2_ACCOUNT_KEY",
|
||||
"RESTIC_TEST_B2_REPOSITORY",
|
||||
}
|
||||
|
||||
for _, v := range vars {
|
||||
if os.Getenv(v) == "" {
|
||||
t.Skipf("environment variable %v not set", v)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendB2(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/b2.TestBackendB2")
|
||||
}
|
||||
}()
|
||||
|
||||
testVars(t)
|
||||
newB2TestSuite(t).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendb2(t *testing.B) {
|
||||
testVars(t)
|
||||
newB2TestSuite(t).RunBenchmarks(t)
|
||||
}
|
93
src/restic/backend/b2/config.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
"restic/options"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to an b2 compatible
|
||||
// server.
|
||||
type Config struct {
|
||||
AccountID string
|
||||
Key string
|
||||
Bucket string
|
||||
Prefix string
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new config with default options applied.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Connections: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
options.Register("b2", Config{})
|
||||
}
|
||||
|
||||
var bucketName = regexp.MustCompile("^[a-zA-Z0-9-]+$")
|
||||
|
||||
// checkBucketName tests the bucket name against the rules at
|
||||
// https://help.backblaze.com/hc/en-us/articles/217666908-What-you-need-to-know-about-B2-Bucket-names
|
||||
func checkBucketName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("bucket name is empty")
|
||||
}
|
||||
|
||||
if len(name) < 6 {
|
||||
return errors.New("bucket name is too short")
|
||||
}
|
||||
|
||||
if len(name) > 50 {
|
||||
return errors.New("bucket name is too long")
|
||||
}
|
||||
|
||||
if !bucketName.MatchString(name) {
|
||||
return errors.New("bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfig parses the string s and extracts the b2 config. The supported
|
||||
// configuration format is b2:bucketname/prefix. If no prefix is given the
|
||||
// prefix "restic" will be used.
|
||||
func ParseConfig(s string) (interface{}, error) {
|
||||
if !strings.HasPrefix(s, "b2:") {
|
||||
return nil, errors.New("invalid format, want: b2:bucket-name[:path]")
|
||||
}
|
||||
|
||||
s = s[3:]
|
||||
data := strings.SplitN(s, ":", 2)
|
||||
if len(data) == 0 || len(data[0]) == 0 {
|
||||
return nil, errors.New("bucket name not found")
|
||||
}
|
||||
|
||||
cfg := NewConfig()
|
||||
cfg.Bucket = data[0]
|
||||
|
||||
if err := checkBucketName(cfg.Bucket); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) == 2 {
|
||||
p := data[1]
|
||||
if len(p) > 0 {
|
||||
p = path.Clean(p)
|
||||
}
|
||||
|
||||
if len(p) > 0 && path.IsAbs(p) {
|
||||
p = p[1:]
|
||||
}
|
||||
|
||||
cfg.Prefix = p
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
92
src/restic/backend/b2/config_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package b2
|
||||
|
||||
import "testing"
|
||||
|
||||
var configTests = []struct {
|
||||
s string
|
||||
cfg Config
|
||||
}{
|
||||
{"b2:bucketname", Config{
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"b2:bucketname:", Config{
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"b2:bucketname:/prefix/directory", Config{
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"b2:foobar", Config{
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"b2:foobar:", Config{
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{"b2:foobar:/", Config{
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
for _, test := range configTests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
cfg, err := ParseConfig(test.s)
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", test.s, err)
|
||||
}
|
||||
|
||||
if cfg != test.cfg {
|
||||
t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v",
|
||||
test.s, test.cfg, cfg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var invalidConfigTests = []struct {
|
||||
s string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
"b2",
|
||||
"invalid format, want: b2:bucket-name[:path]",
|
||||
},
|
||||
{
|
||||
"b2:",
|
||||
"bucket name not found",
|
||||
},
|
||||
{
|
||||
"b2:bucket_name",
|
||||
"bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)",
|
||||
},
|
||||
{
|
||||
"b2:bucketname/prefix/directory/",
|
||||
"bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)",
|
||||
},
|
||||
}
|
||||
|
||||
func TestInvalidConfig(t *testing.T) {
|
||||
for _, test := range invalidConfigTests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
cfg, err := ParseConfig(test.s)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error not found for invalid config: %v, cfg is:\n%#v", test.s, cfg)
|
||||
}
|
||||
|
||||
if err.Error() != test.err {
|
||||
t.Fatalf("unexpected error found, want:\n %v\ngot:\n %v", test.err, err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
29
src/restic/backend/http_transport.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"restic/debug"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport returns a new http.RoundTripper with default settings applied.
|
||||
func Transport() http.RoundTripper {
|
||||
// copied from net/http
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
// wrap in the debug round tripper
|
||||
return debug.RoundTripper(tr)
|
||||
}
|
167
src/restic/backend/layout.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/fs"
|
||||
)
|
||||
|
||||
// Layout computes paths for file name storage.
|
||||
type Layout interface {
|
||||
Filename(restic.Handle) string
|
||||
Dirname(restic.Handle) string
|
||||
Basedir(restic.FileType) string
|
||||
Paths() []string
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Filesystem is the abstraction of a file system used for a backend.
|
||||
type Filesystem interface {
|
||||
Join(...string) string
|
||||
ReadDir(string) ([]os.FileInfo, error)
|
||||
IsNotExist(error) bool
|
||||
}
|
||||
|
||||
// ensure statically that *LocalFilesystem implements Filesystem.
|
||||
var _ Filesystem = &LocalFilesystem{}
|
||||
|
||||
// LocalFilesystem implements Filesystem in a local path.
|
||||
type LocalFilesystem struct {
|
||||
}
|
||||
|
||||
// ReadDir returns all entries of a directory.
|
||||
func (l *LocalFilesystem) ReadDir(dir string) ([]os.FileInfo, error) {
|
||||
f, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Readdir")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Join combines several path components to one.
|
||||
func (l *LocalFilesystem) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
// IsNotExist returns true for errors that are caused by not existing files.
|
||||
func (l *LocalFilesystem) IsNotExist(err error) bool {
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
var backendFilenameLength = len(restic.ID{}) * 2
|
||||
var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength))
|
||||
|
||||
func hasBackendFile(fs Filesystem, dir string) (bool, error) {
|
||||
entries, err := fs.ReadDir(dir)
|
||||
if err != nil && fs.IsNotExist(errors.Cause(err)) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "ReadDir")
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if backendFilename.MatchString(e.Name()) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout
|
||||
// cannot be detected automatically.
|
||||
var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed")
|
||||
|
||||
// DetectLayout tries to find out which layout is used in a local (or sftp)
|
||||
// filesystem at the given path. If repo is nil, an instance of LocalFilesystem
|
||||
// is used.
|
||||
func DetectLayout(repo Filesystem, dir string) (Layout, error) {
|
||||
debug.Log("detect layout at %v", dir)
|
||||
if repo == nil {
|
||||
repo = &LocalFilesystem{}
|
||||
}
|
||||
|
||||
// key file in the "keys" dir (DefaultLayout)
|
||||
foundKeysFile, err := hasBackendFile(repo, repo.Join(dir, defaultLayoutPaths[restic.KeyFile]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// key file in the "key" dir (S3LegacyLayout)
|
||||
foundKeyFile, err := hasBackendFile(repo, repo.Join(dir, s3LayoutPaths[restic.KeyFile]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if foundKeysFile && !foundKeyFile {
|
||||
debug.Log("found default layout at %v", dir)
|
||||
return &DefaultLayout{
|
||||
Path: dir,
|
||||
Join: repo.Join,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if foundKeyFile && !foundKeysFile {
|
||||
debug.Log("found s3 layout at %v", dir)
|
||||
return &S3LegacyLayout{
|
||||
Path: dir,
|
||||
Join: repo.Join,
|
||||
}, nil
|
||||
}
|
||||
|
||||
debug.Log("layout detection failed")
|
||||
return nil, ErrLayoutDetectionFailed
|
||||
}
|
||||
|
||||
// ParseLayout parses the config string and returns a Layout. When layout is
|
||||
// the empty string, DetectLayout is used. If that fails, defaultLayout is used.
|
||||
func ParseLayout(repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) {
|
||||
debug.Log("parse layout string %q for backend at %v", layout, path)
|
||||
switch layout {
|
||||
case "default":
|
||||
l = &DefaultLayout{
|
||||
Path: path,
|
||||
Join: repo.Join,
|
||||
}
|
||||
case "s3legacy":
|
||||
l = &S3LegacyLayout{
|
||||
Path: path,
|
||||
Join: repo.Join,
|
||||
}
|
||||
case "":
|
||||
l, err = DetectLayout(repo, path)
|
||||
|
||||
// use the default layout if auto detection failed
|
||||
if errors.Cause(err) == ErrLayoutDetectionFailed && defaultLayout != "" {
|
||||
debug.Log("error: %v, use default layout %v", err, defaultLayout)
|
||||
return ParseLayout(repo, defaultLayout, "", path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
debug.Log("layout detected: %v", l)
|
||||
default:
|
||||
return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout)
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
63
src/restic/backend/layout_default.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package backend
|
||||
|
||||
import "restic"
|
||||
|
||||
// DefaultLayout implements the default layout for local and sftp backends, as
|
||||
// described in the Design document. The `data` directory has one level of
|
||||
// subdirs, two characters each (taken from the first two characters of the
|
||||
// file name).
|
||||
type DefaultLayout struct {
|
||||
Path string
|
||||
Join func(...string) string
|
||||
}
|
||||
|
||||
var defaultLayoutPaths = map[restic.FileType]string{
|
||||
restic.DataFile: "data",
|
||||
restic.SnapshotFile: "snapshots",
|
||||
restic.IndexFile: "index",
|
||||
restic.LockFile: "locks",
|
||||
restic.KeyFile: "keys",
|
||||
}
|
||||
|
||||
func (l *DefaultLayout) String() string {
|
||||
return "<DefaultLayout>"
|
||||
}
|
||||
|
||||
// Name returns the name for this layout.
|
||||
func (l *DefaultLayout) Name() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
// Dirname returns the directory path for a given file type and name.
|
||||
func (l *DefaultLayout) Dirname(h restic.Handle) string {
|
||||
p := defaultLayoutPaths[h.Type]
|
||||
|
||||
if h.Type == restic.DataFile && len(h.Name) > 2 {
|
||||
p = l.Join(p, h.Name[:2]) + "/"
|
||||
}
|
||||
|
||||
return l.Join(l.Path, p) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
func (l *DefaultLayout) Filename(h restic.Handle) string {
|
||||
name := h.Name
|
||||
if h.Type == restic.ConfigFile {
|
||||
return l.Join(l.Path, "config")
|
||||
}
|
||||
|
||||
return l.Join(l.Dirname(h), name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names
|
||||
func (l *DefaultLayout) Paths() (dirs []string) {
|
||||
for _, p := range defaultLayoutPaths {
|
||||
dirs = append(dirs, l.Join(l.Path, p))
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Basedir returns the base dir name for type t.
|
||||
func (l *DefaultLayout) Basedir(t restic.FileType) string {
|
||||
return l.Join(l.Path, defaultLayoutPaths[t])
|
||||
}
|
54
src/restic/backend/layout_rest.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package backend
|
||||
|
||||
import "restic"
|
||||
|
||||
// RESTLayout implements the default layout for the REST protocol.
|
||||
type RESTLayout struct {
|
||||
URL string
|
||||
Path string
|
||||
Join func(...string) string
|
||||
}
|
||||
|
||||
var restLayoutPaths = defaultLayoutPaths
|
||||
|
||||
func (l *RESTLayout) String() string {
|
||||
return "<RESTLayout>"
|
||||
}
|
||||
|
||||
// Name returns the name for this layout.
|
||||
func (l *RESTLayout) Name() string {
|
||||
return "rest"
|
||||
}
|
||||
|
||||
// Dirname returns the directory path for a given file type and name.
|
||||
func (l *RESTLayout) Dirname(h restic.Handle) string {
|
||||
if h.Type == restic.ConfigFile {
|
||||
return l.URL + l.Join(l.Path, "/")
|
||||
}
|
||||
|
||||
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
func (l *RESTLayout) Filename(h restic.Handle) string {
|
||||
name := h.Name
|
||||
|
||||
if h.Type == restic.ConfigFile {
|
||||
name = "config"
|
||||
}
|
||||
|
||||
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names
|
||||
func (l *RESTLayout) Paths() (dirs []string) {
|
||||
for _, p := range restLayoutPaths {
|
||||
dirs = append(dirs, l.URL+l.Join(l.Path, p))
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Basedir returns the base dir name for files of type t.
|
||||
func (l *RESTLayout) Basedir(t restic.FileType) string {
|
||||
return l.URL + l.Join(l.Path, restLayoutPaths[t])
|
||||
}
|
77
src/restic/backend/layout_s3legacy.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package backend
|
||||
|
||||
import "restic"
|
||||
|
||||
// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as
|
||||
// described in the Design document.
|
||||
type S3LegacyLayout struct {
|
||||
URL string
|
||||
Path string
|
||||
Join func(...string) string
|
||||
}
|
||||
|
||||
var s3LayoutPaths = map[restic.FileType]string{
|
||||
restic.DataFile: "data",
|
||||
restic.SnapshotFile: "snapshot",
|
||||
restic.IndexFile: "index",
|
||||
restic.LockFile: "lock",
|
||||
restic.KeyFile: "key",
|
||||
}
|
||||
|
||||
func (l *S3LegacyLayout) String() string {
|
||||
return "<S3LegacyLayout>"
|
||||
}
|
||||
|
||||
// Name returns the name for this layout.
|
||||
func (l *S3LegacyLayout) Name() string {
|
||||
return "s3legacy"
|
||||
}
|
||||
|
||||
// join calls Join with the first empty elements removed.
|
||||
func (l *S3LegacyLayout) join(url string, items ...string) string {
|
||||
for len(items) > 0 && items[0] == "" {
|
||||
items = items[1:]
|
||||
}
|
||||
|
||||
path := l.Join(items...)
|
||||
if path == "" || path[0] != '/' {
|
||||
if url != "" && url[len(url)-1] != '/' {
|
||||
url += "/"
|
||||
}
|
||||
}
|
||||
|
||||
return url + path
|
||||
}
|
||||
|
||||
// Dirname returns the directory path for a given file type and name.
|
||||
func (l *S3LegacyLayout) Dirname(h restic.Handle) string {
|
||||
if h.Type == restic.ConfigFile {
|
||||
return l.URL + l.Join(l.Path, "/")
|
||||
}
|
||||
|
||||
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
func (l *S3LegacyLayout) Filename(h restic.Handle) string {
|
||||
name := h.Name
|
||||
|
||||
if h.Type == restic.ConfigFile {
|
||||
name = "config"
|
||||
}
|
||||
|
||||
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names
|
||||
func (l *S3LegacyLayout) Paths() (dirs []string) {
|
||||
for _, p := range s3LayoutPaths {
|
||||
dirs = append(dirs, l.Join(l.Path, p))
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Basedir returns the base dir name for type t.
|
||||
func (l *S3LegacyLayout) Basedir(t restic.FileType) string {
|
||||
return l.Join(l.Path, s3LayoutPaths[t])
|
||||
}
|