mirror of
https://github.com/restic/restic.git
synced 2025-08-25 19:57:35 +00:00
Compare commits
498 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
eb33e564c9 | ||
![]() |
62680af734 | ||
![]() |
68460fd3d1 | ||
![]() |
9459328d8d | ||
![]() |
9d71990c26 | ||
![]() |
9219b6a0ef | ||
![]() |
9a0ec05145 | ||
![]() |
2a5b6af2e9 | ||
![]() |
51e4e5ef82 | ||
![]() |
e998314088 | ||
![]() |
be03c1a457 | ||
![]() |
95050117eb | ||
![]() |
21a3a41b69 | ||
![]() |
3f919f2371 | ||
![]() |
50b43fbac0 | ||
![]() |
f689e2638d | ||
![]() |
f9ef2b1e44 | ||
![]() |
b87a37f318 | ||
![]() |
647ebf352a | ||
![]() |
62d3ef4a93 | ||
![]() |
6c5fc32967 | ||
![]() |
ae13cf15c1 | ||
![]() |
1fe1ec40a2 | ||
![]() |
32f5ee6f4e | ||
![]() |
5bd8a6d7eb | ||
![]() |
4a33370072 | ||
![]() |
98fb56baa6 | ||
![]() |
f12bbd9229 | ||
![]() |
6adb629608 | ||
![]() |
25ff9fa893 | ||
![]() |
bdaec8fdb8 | ||
![]() |
55b440b520 | ||
![]() |
3f63b53090 | ||
![]() |
76253b2a20 | ||
![]() |
20e82d1fcf | ||
![]() |
96eada3d5f | ||
![]() |
b8f4267a36 | ||
![]() |
1f6883a05c | ||
![]() |
8154f6a77a | ||
![]() |
184b7616ba | ||
![]() |
67f237b4f3 | ||
![]() |
090f9d6237 | ||
![]() |
321cc35cde | ||
![]() |
a97915642c | ||
![]() |
c64d81063e | ||
![]() |
233b841ad9 | ||
![]() |
85860e6e97 | ||
![]() |
2dd6769429 | ||
![]() |
5d9b0d894e | ||
![]() |
d4bf7a3cb1 | ||
![]() |
24c8a33da9 | ||
![]() |
399f8e84a1 | ||
![]() |
6a436d731d | ||
![]() |
b2fcbc21cb | ||
![]() |
05e5e29a8c | ||
![]() |
f490288738 | ||
![]() |
51718ec561 | ||
![]() |
11eb88a2ea | ||
![]() |
bee3231ed4 | ||
![]() |
60d8066568 | ||
![]() |
08dea911bd | ||
![]() |
47206a6579 | ||
![]() |
594689db32 | ||
![]() |
5705326bb8 | ||
![]() |
978ebaac49 | ||
![]() |
307aeb6849 | ||
![]() |
9cd85d5956 | ||
![]() |
c9f506925c | ||
![]() |
e990d3d483 | ||
![]() |
7042190807 | ||
![]() |
c158741e2e | ||
![]() |
89fbd39e59 | ||
![]() |
1ce599d2ae | ||
![]() |
789fec3da7 | ||
![]() |
8a120c8800 | ||
![]() |
41395e83c5 | ||
![]() |
2a193195b0 | ||
![]() |
229c7b24a4 | ||
![]() |
b34ce57dd4 | ||
![]() |
1ce839228e | ||
![]() |
fb1170c1d6 | ||
![]() |
e457fe22bc | ||
![]() |
39299e36ef | ||
![]() |
0512f292e8 | ||
![]() |
3a93e28605 | ||
![]() |
6b82cce1bd | ||
![]() |
31e07cecbb | ||
![]() |
c181b51360 | ||
![]() |
ccd19b7e88 | ||
![]() |
b0987ff570 | ||
![]() |
eff3124f15 | ||
![]() |
43fa051546 | ||
![]() |
2293835242 | ||
![]() |
0fcb6c7f94 | ||
![]() |
325fa916b5 | ||
![]() |
170e495334 | ||
![]() |
9d44682e3e | ||
![]() |
4d43509423 | ||
![]() |
ea9ad77e05 | ||
![]() |
cc84884d2e | ||
![]() |
4a5ae2ba84 | ||
![]() |
1f1e50f49e | ||
![]() |
f3c3b0f377 | ||
![]() |
7e2be9e081 | ||
![]() |
cc3c218baf | ||
![]() |
c1578a2035 | ||
![]() |
678b983300 | ||
![]() |
1b3870dc43 | ||
![]() |
ef40aee2bd | ||
![]() |
3a32c4e59f | ||
![]() |
e703e89e9b | ||
![]() |
389f6ee74c | ||
![]() |
bbac74b172 | ||
![]() |
a280b7364e | ||
![]() |
825651a135 | ||
![]() |
e36d17a6f8 | ||
![]() |
068b115abc | ||
![]() |
41a5bf357f | ||
![]() |
f96896a9c0 | ||
![]() |
2ab9a3b9c3 | ||
![]() |
dbe2eef80c | ||
![]() |
6e7c6674ad | ||
![]() |
7fe830ee1e | ||
![]() |
a07c7166ba | ||
![]() |
d2f8f9de23 | ||
![]() |
d5fe5107c8 | ||
![]() |
f08ba1a005 | ||
![]() |
70fb554854 | ||
![]() |
8c02ebb029 | ||
![]() |
8dcb0c4a9d | ||
![]() |
74ca82a6f8 | ||
![]() |
9cbc2502c6 | ||
![]() |
93038ed8f4 | ||
![]() |
8da5a6649b | ||
![]() |
3888c21a27 | ||
![]() |
1257c2c075 | ||
![]() |
182b9796e4 | ||
![]() |
cbf87fbdb3 | ||
![]() |
b5511e8e4c | ||
![]() |
50e0d5e6b5 | ||
![]() |
705ad51bcc | ||
![]() |
13a8b5822f | ||
![]() |
3d3bb88745 | ||
![]() |
3a3cf608f5 | ||
![]() |
19ac12d95b | ||
![]() |
3325a7c862 | ||
![]() |
555be49a79 | ||
![]() |
9aa9e0d1ec | ||
![]() |
7d12c29286 | ||
![]() |
56836364a4 | ||
![]() |
4df77e9f26 | ||
![]() |
2545c84321 | ||
![]() |
191c47d30e | ||
![]() |
dd1ef13c1c | ||
![]() |
4d5ee987a7 | ||
![]() |
b2ed42cec4 | ||
![]() |
61042a77a4 | ||
![]() |
4e9e2c3229 | ||
![]() |
faec0ff816 | ||
![]() |
07d1f8047e | ||
![]() |
b2b0760eb0 | ||
![]() |
cf16239058 | ||
![]() |
1531eab746 | ||
![]() |
d54176ce5d | ||
![]() |
a9aff885d6 | ||
![]() |
bb20078641 | ||
![]() |
237f32c651 | ||
![]() |
74e4656850 | ||
![]() |
c37d587f81 | ||
![]() |
ffc6b3d887 | ||
![]() |
88c63a029c | ||
![]() |
0b908bb1fb | ||
![]() |
0372c7ef04 | ||
![]() |
9464c63550 | ||
![]() |
6ebf2dd235 | ||
![]() |
5f153109ba | ||
![]() |
2beaa74892 | ||
![]() |
55c21846b1 | ||
![]() |
0f80b6a137 | ||
![]() |
e14ccb1142 | ||
![]() |
609367195a | ||
![]() |
18eb1d3ab0 | ||
![]() |
32a6b66267 | ||
![]() |
f903db492c | ||
![]() |
25a0be7f26 | ||
![]() |
a27b7f1370 | ||
![]() |
fa361dbfbd | ||
![]() |
5260d38980 | ||
![]() |
2f7b4ceae1 | ||
![]() |
aea7538936 | ||
![]() |
49a6a4f5bf | ||
![]() |
237d00000e | ||
![]() |
0c727f6ad1 | ||
![]() |
4e7d3efad4 | ||
![]() |
17446da5fd | ||
![]() |
a3cee840d2 | ||
![]() |
c76f3a1e27 | ||
![]() |
1e9714088d | ||
![]() |
58e3f5955c | ||
![]() |
dd8d8b1ae0 | ||
![]() |
e1ac0f0e0c | ||
![]() |
a88d90b8e5 | ||
![]() |
2fcb3947df | ||
![]() |
eef0ee7a85 | ||
![]() |
f1b73c9301 | ||
![]() |
ac1dfc99bb | ||
![]() |
098de3554c | ||
![]() |
8812dcd56a | ||
![]() |
379282299a | ||
![]() |
0c796dbd9b | ||
![]() |
f9dded83b3 | ||
![]() |
88a10a368f | ||
![]() |
9a7056a479 | ||
![]() |
fc05e35a08 | ||
![]() |
f1c388c623 | ||
![]() |
12141afbad | ||
![]() |
fed33295c3 | ||
![]() |
b217f38ee7 | ||
![]() |
0c1240360d | ||
![]() |
ffca602315 | ||
![]() |
da419be43c | ||
![]() |
8f1ca8fabe | ||
![]() |
f74dad2afb | ||
![]() |
3e287afdbf | ||
![]() |
06894484a1 | ||
![]() |
6e5b42d5c4 | ||
![]() |
ed5b2c2c9b | ||
![]() |
13c32b0fbe | ||
![]() |
9747cef338 | ||
![]() |
8e913e6d3a | ||
![]() |
b93459cbb0 | ||
![]() |
692f81ede8 | ||
![]() |
7a268e4aba | ||
![]() |
4b3a0b4104 | ||
![]() |
cebce52c16 | ||
![]() |
675a49a95b | ||
![]() |
e2dba9f5c7 | ||
![]() |
06fd6b54d7 | ||
![]() |
419e6f26b1 | ||
![]() |
c3212ab6a6 | ||
![]() |
658aa4c0f7 | ||
![]() |
998cf5a7f8 | ||
![]() |
7eec91f841 | ||
![]() |
51dc80be5b | ||
![]() |
ddbc0c1b37 | ||
![]() |
ecbf8e055c | ||
![]() |
16ba237d8b | ||
![]() |
a466e945d9 | ||
![]() |
03b9764bce | ||
![]() |
22c9276719 | ||
![]() |
1e33b285c1 | ||
![]() |
c05f96e6b9 | ||
![]() |
94752b7ee2 | ||
![]() |
0058745881 | ||
![]() |
a719d10e22 | ||
![]() |
b0a01ae68a | ||
![]() |
472bf5184f | ||
![]() |
d1a5ec7839 | ||
![]() |
1514593f22 | ||
![]() |
5e4e268bdc | ||
![]() |
3252f60df5 | ||
![]() |
2fa8b96843 | ||
![]() |
7a01bd3b67 | ||
![]() |
319087c056 | ||
![]() |
6ed73ed408 | ||
![]() |
c832a492ac | ||
![]() |
e01baeabba | ||
![]() |
bfc9c6c971 | ||
![]() |
5773b86d02 | ||
![]() |
a013014c24 | ||
![]() |
f9850b79b5 | ||
![]() |
2f518b7241 | ||
![]() |
49be202cb0 | ||
![]() |
19ebc1b786 | ||
![]() |
23a122a901 | ||
![]() |
e77002f841 | ||
![]() |
d05f6211d1 | ||
![]() |
ee3c55ea3d | ||
![]() |
db046c0acc | ||
![]() |
3e6a26e2e9 | ||
![]() |
65c5e511a1 | ||
![]() |
6d10c655a0 | ||
![]() |
bb40e49e75 | ||
![]() |
fefe15d7a1 | ||
![]() |
78e5aa6d30 | ||
![]() |
1cb11ad8ad | ||
![]() |
90a663c94f | ||
![]() |
88a7231217 | ||
![]() |
c0627dc80d | ||
![]() |
e71367e6b9 | ||
![]() |
5aa37acdaa | ||
![]() |
9c64a95df8 | ||
![]() |
7c8dd61e8c | ||
![]() |
f6cc10578d | ||
![]() |
4ce87a7f64 | ||
![]() |
e17ee40a31 | ||
![]() |
1bd1f3008d | ||
![]() |
38dac78180 | ||
![]() |
bc2399fbd9 | ||
![]() |
1a9705fc95 | ||
![]() |
8c4caf09a8 | ||
![]() |
375189488c | ||
![]() |
903651c719 | ||
![]() |
118d599d0a | ||
![]() |
db459eda21 | ||
![]() |
a14a63cd29 | ||
![]() |
947f0c345e | ||
![]() |
d23a2e1925 | ||
![]() |
08ae708b3b | ||
![]() |
99a05d5ab2 | ||
![]() |
6557f36f61 | ||
![]() |
5f58797ba7 | ||
![]() |
9cef6b4c69 | ||
![]() |
8a78a042db | ||
![]() |
b491af2b57 | ||
![]() |
d747a9c401 | ||
![]() |
888c1ae63a | ||
![]() |
5eeb257c95 | ||
![]() |
ae6729cf89 | ||
![]() |
6abd494915 | ||
![]() |
7147a54ceb | ||
![]() |
8d971172c4 | ||
![]() |
37d0e323eb | ||
![]() |
face5bd7f7 | ||
![]() |
1daf928a77 | ||
![]() |
37bab08181 | ||
![]() |
6dc2324d2e | ||
![]() |
850cd9aace | ||
![]() |
b50ff04cf3 | ||
![]() |
c8641f4479 | ||
![]() |
ac7ac0cb97 | ||
![]() |
be8be3397c | ||
![]() |
db6b4f8912 | ||
![]() |
1f3f042f32 | ||
![]() |
0aaa4e6cbe | ||
![]() |
0bac935dac | ||
![]() |
306a29980a | ||
![]() |
1e6e9f9bd0 | ||
![]() |
f342db7666 | ||
![]() |
41cc320145 | ||
![]() |
cdb0fb9c06 | ||
![]() |
94cbc6392d | ||
![]() |
78a1757e5a | ||
![]() |
22562d2132 | ||
![]() |
51d823348d | ||
![]() |
831f593b87 | ||
![]() |
179e11c2ae | ||
![]() |
ebba233a3a | ||
![]() |
8479390d7c | ||
![]() |
756f43d5f9 | ||
![]() |
affd04c125 | ||
![]() |
f27750e270 | ||
![]() |
1dd873b706 | ||
![]() |
7a60d9e54f | ||
![]() |
3001dd8c2b | ||
![]() |
4503aea0df | ||
![]() |
09cddb8927 | ||
![]() |
913eab3361 | ||
![]() |
a9c7c12276 | ||
![]() |
85eef232e6 | ||
![]() |
07a44a88f2 | ||
![]() |
48e065d971 | ||
![]() |
a06d927dce | ||
![]() |
fd3ed9e2f4 | ||
![]() |
6042df075f | ||
![]() |
c934c99d41 | ||
![]() |
616926d2c1 | ||
![]() |
05abc6d6f5 | ||
![]() |
45244fdf68 | ||
![]() |
803640ba4b | ||
![]() |
8e1e3844aa | ||
![]() |
8b5ab5b59f | ||
![]() |
4703473ec5 | ||
![]() |
8bfc2519d7 | ||
![]() |
ba16904eed | ||
![]() |
2841a87cc6 | ||
![]() |
fab4a8a4d2 | ||
![]() |
3b24c15c3d | ||
![]() |
4304e01ca2 | ||
![]() |
6d6c04abef | ||
![]() |
49e32f3f8a | ||
![]() |
9412f37e50 | ||
![]() |
593eb710b4 | ||
![]() |
97274ecabd | ||
![]() |
74f7dd0b38 | ||
![]() |
21ad357c10 | ||
![]() |
087cf7e114 | ||
![]() |
e604939e72 | ||
![]() |
37aca6bec0 | ||
![]() |
bdcafbc11c | ||
![]() |
ba33e41068 | ||
![]() |
4661f45a8c | ||
![]() |
2091fc0dde | ||
![]() |
9238dcc81a | ||
![]() |
71537da4b0 | ||
![]() |
ce51d2f3c0 | ||
![]() |
403b7ca2be | ||
![]() |
309cf0586a | ||
![]() |
9f9e91eb0d | ||
![]() |
1f43003cc1 | ||
![]() |
8ce5f29758 | ||
![]() |
8afc117aa3 | ||
![]() |
cf1cc1fb72 | ||
![]() |
64233ca0a7 | ||
![]() |
ea59896bd6 | ||
![]() |
806a0cdce3 | ||
![]() |
faa83db9e4 | ||
![]() |
9358a5fb37 | ||
![]() |
71c9516b26 | ||
![]() |
17ac91fba3 | ||
![]() |
26a3c47c5c | ||
![]() |
7896e50301 | ||
![]() |
9584cbda90 | ||
![]() |
cacc48fc09 | ||
![]() |
7fbaca577b | ||
![]() |
1f9dd84d1e | ||
![]() |
c8ff5592b8 | ||
![]() |
f875a8843d | ||
![]() |
0ed5c20c57 | ||
![]() |
21edbdc3ac | ||
![]() |
220d937975 | ||
![]() |
5f13bbc118 | ||
![]() |
9672670756 | ||
![]() |
5ac24a9744 | ||
![]() |
99e247caa6 | ||
![]() |
0c705e07db | ||
![]() |
024d01d85b | ||
![]() |
0666fa11b8 | ||
![]() |
caa0e89114 | ||
![]() |
46a7072f3f | ||
![]() |
1f12915b0c | ||
![]() |
a0885d5d69 | ||
![]() |
5dccab701a | ||
![]() |
5069c9edd9 | ||
![]() |
1a584cb16e | ||
![]() |
84ede6ad7a | ||
![]() |
b7f03d01b8 | ||
![]() |
eaceaca113 | ||
![]() |
a7ac9a4769 | ||
![]() |
23ed03a267 | ||
![]() |
aac8c5a7ca | ||
![]() |
7c8a401d97 | ||
![]() |
d83332315c | ||
![]() |
c2703e5024 | ||
![]() |
eb7dbc88b5 | ||
![]() |
e02aadf1d2 | ||
![]() |
c4e6b198ae | ||
![]() |
f47c8eebb7 | ||
![]() |
996e2ac7c5 | ||
![]() |
a67d3781a3 | ||
![]() |
153a73ebba | ||
![]() |
f9d6e3a035 | ||
![]() |
51656e8764 | ||
![]() |
9e23200bff | ||
![]() |
b884643b40 | ||
![]() |
5be4845710 | ||
![]() |
cfa3c6abc5 | ||
![]() |
f499e66032 | ||
![]() |
00575ecffe | ||
![]() |
cb5694d136 | ||
![]() |
100b06d806 | ||
![]() |
667536cea4 | ||
![]() |
ba183c44c3 | ||
![]() |
32e6a438be | ||
![]() |
b77b0749fa | ||
![]() |
6aca7dac21 | ||
![]() |
8161605f1b | ||
![]() |
b78607c9d8 | ||
![]() |
c1101ede19 | ||
![]() |
f646406822 | ||
![]() |
9888443f5c | ||
![]() |
cf6dfd6d36 | ||
![]() |
a7786c67f1 | ||
![]() |
15b7d9c80b | ||
![]() |
ee4128281e | ||
![]() |
3c8aefa0cb | ||
![]() |
e2df73b0ac | ||
![]() |
f4329a20f6 | ||
![]() |
11ebc0c5db | ||
![]() |
f137be42fe | ||
![]() |
0ce182f044 | ||
![]() |
1b50faf03e | ||
![]() |
4cbbf5d952 | ||
![]() |
2885db7902 | ||
![]() |
acb40d2b94 | ||
![]() |
fa73b50b45 | ||
![]() |
2d700c3887 | ||
![]() |
91251f2d57 | ||
![]() |
3df4ec7c61 | ||
![]() |
b1d3a1a5e3 | ||
![]() |
4f31c2699d | ||
![]() |
96b1ff5e38 | ||
![]() |
6b5ffce9dc | ||
![]() |
590eb9efd7 | ||
![]() |
55c4ca66f7 | ||
![]() |
56ad761b19 | ||
![]() |
d129baba7a | ||
![]() |
febb32b5b4 |
@@ -1,12 +0,0 @@
|
|||||||
# Folders
|
|
||||||
.git/
|
|
||||||
.github/
|
|
||||||
changelog/
|
|
||||||
doc/
|
|
||||||
docker/
|
|
||||||
helpers/
|
|
||||||
|
|
||||||
# Files
|
|
||||||
.gitignore
|
|
||||||
.golangci.yml
|
|
||||||
*.md
|
|
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -4,10 +4,10 @@ updates:
|
|||||||
- package-ecosystem: "gomod"
|
- package-ecosystem: "gomod"
|
||||||
directory: "/" # Location of package manifests
|
directory: "/" # Location of package manifests
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "monthly"
|
||||||
|
|
||||||
# Dependencies listed in .github/workflows/*.yml
|
# Dependencies listed in .github/workflows/*.yml
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: "github-actions"
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "monthly"
|
||||||
|
59
.github/workflows/docker.yml
vendored
Normal file
59
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
|
||||||
|
name: Create and publish a Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
if: github.repository == 'restic/restic'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .
|
||||||
|
file: docker/Dockerfile.release
|
||||||
|
platforms: linux/386,linux/amd64,linux/arm,linux/arm64
|
||||||
|
pull: true
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
89
.github/workflows/tests.yml
vendored
89
.github/workflows/tests.yml
vendored
@@ -7,9 +7,13 @@ on:
|
|||||||
|
|
||||||
# run tests for all pull requests
|
# run tests for all pull requests
|
||||||
pull_request:
|
pull_request:
|
||||||
|
merge_group:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
env:
|
env:
|
||||||
latest_go: "1.19.x"
|
latest_go: "1.20.x"
|
||||||
GO111MODULE: on
|
GO111MODULE: on
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -19,27 +23,32 @@ jobs:
|
|||||||
# list of jobs to run:
|
# list of jobs to run:
|
||||||
include:
|
include:
|
||||||
- job_name: Windows
|
- job_name: Windows
|
||||||
go: 1.19.x
|
go: 1.20.x
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
|
|
||||||
- job_name: macOS
|
- job_name: macOS
|
||||||
go: 1.19.x
|
go: 1.20.x
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
test_fuse: false
|
test_fuse: false
|
||||||
|
|
||||||
- job_name: Linux
|
- job_name: Linux
|
||||||
go: 1.19.x
|
go: 1.20.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
test_cloud_backends: true
|
test_cloud_backends: true
|
||||||
test_fuse: true
|
test_fuse: true
|
||||||
check_changelog: true
|
check_changelog: true
|
||||||
|
|
||||||
- job_name: Linux (race)
|
- job_name: Linux (race)
|
||||||
go: 1.19.x
|
go: 1.20.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
test_fuse: true
|
test_fuse: true
|
||||||
test_opts: "-race"
|
test_opts: "-race"
|
||||||
|
|
||||||
|
- job_name: Linux
|
||||||
|
go: 1.19.x
|
||||||
|
os: ubuntu-latest
|
||||||
|
test_fuse: true
|
||||||
|
|
||||||
- job_name: Linux
|
- job_name: Linux
|
||||||
go: 1.18.x
|
go: 1.18.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
@@ -53,7 +62,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ matrix.go }}
|
- name: Set up Go ${{ matrix.go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
@@ -132,6 +141,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
go run build.go
|
go run build.go
|
||||||
|
|
||||||
|
- name: Minimal test
|
||||||
|
run: |
|
||||||
|
./restic init
|
||||||
|
./restic backup .
|
||||||
|
env:
|
||||||
|
RESTIC_REPOSITORY: ../testrepo
|
||||||
|
RESTIC_PASSWORD: password
|
||||||
|
|
||||||
- name: Run local Tests
|
- name: Run local Tests
|
||||||
env:
|
env:
|
||||||
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
|
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
|
||||||
@@ -179,7 +196,7 @@ jobs:
|
|||||||
# own repo, otherwise the secrets are not available
|
# own repo, otherwise the secrets are not available
|
||||||
# Skip for Dependabot pull requests as these are run without secrets
|
# Skip for Dependabot pull requests as these are run without secrets
|
||||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
|
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
|
||||||
if: (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
if: ((github.repository == 'restic/restic' && github.event_name == 'push') || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
||||||
|
|
||||||
- name: Check changelog files with calens
|
- name: Check changelog files with calens
|
||||||
run: |
|
run: |
|
||||||
@@ -193,56 +210,41 @@ jobs:
|
|||||||
cross_compile:
|
cross_compile:
|
||||||
strategy:
|
strategy:
|
||||||
|
|
||||||
# ATTENTION: the list of architectures must be in sync with helpers/build-release-binaries/main.go!
|
|
||||||
matrix:
|
matrix:
|
||||||
# run cross-compile in three batches parallel so the overall tests run faster
|
# run cross-compile in three batches parallel so the overall tests run faster
|
||||||
targets:
|
subset:
|
||||||
- "linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64le linux/mips linux/mipsle linux/mips64 linux/mips64le linux/s390x"
|
- "0/3"
|
||||||
|
- "1/3"
|
||||||
- "openbsd/386 openbsd/amd64 \
|
- "2/3"
|
||||||
freebsd/386 freebsd/amd64 freebsd/arm \
|
|
||||||
aix/ppc64 \
|
|
||||||
darwin/amd64 darwin/arm64"
|
|
||||||
|
|
||||||
- "netbsd/386 netbsd/amd64 \
|
|
||||||
windows/386 windows/amd64 \
|
|
||||||
solaris/amd64"
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GOPROXY: https://proxy.golang.org
|
GOPROXY: https://proxy.golang.org
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
name: Cross Compile for ${{ matrix.targets }}
|
name: Cross Compile for subset ${{ matrix.subset }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ env.latest_go }}
|
- name: Set up Go ${{ env.latest_go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.latest_go }}
|
go-version: ${{ env.latest_go }}
|
||||||
|
|
||||||
- name: Install gox
|
|
||||||
run: |
|
|
||||||
go install github.com/mitchellh/gox@latest
|
|
||||||
|
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Cross-compile with gox for ${{ matrix.targets }}
|
- name: Cross-compile for subset ${{ matrix.subset }}
|
||||||
env:
|
|
||||||
GOFLAGS: "-trimpath"
|
|
||||||
GOX_ARCHS: "${{ matrix.targets }}"
|
|
||||||
run: |
|
run: |
|
||||||
mkdir build-output
|
mkdir build-output build-output-debug
|
||||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}" ./cmd/restic
|
go run ./helpers/build-release-binaries/main.go -o build-output -s . --platform-subset ${{ matrix.subset }}
|
||||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -tags debug -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}_debug" ./cmd/restic
|
go run ./helpers/build-release-binaries/main.go -o build-output-debug -s . --platform-subset ${{ matrix.subset }} --tags debug
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ env.latest_go }}
|
- name: Set up Go ${{ env.latest_go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.latest_go }}
|
go-version: ${{ env.latest_go }}
|
||||||
|
|
||||||
@@ -253,9 +255,7 @@ jobs:
|
|||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
with:
|
with:
|
||||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||||
version: v1.49
|
version: v1.52.2
|
||||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
|
||||||
only-new-issues: true
|
|
||||||
args: --verbose --timeout 5m
|
args: --verbose --timeout 5m
|
||||||
|
|
||||||
# only run golangci-lint for pull requests, otherwise ALL hints get
|
# only run golangci-lint for pull requests, otherwise ALL hints get
|
||||||
@@ -269,6 +269,21 @@ jobs:
|
|||||||
go mod tidy
|
go mod tidy
|
||||||
git diff --exit-code go.mod go.sum
|
git diff --exit-code go.mod go.sum
|
||||||
|
|
||||||
|
analyze:
|
||||||
|
name: Analyze results
|
||||||
|
needs: [test, cross_compile, lint]
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
permissions: # no need to access code
|
||||||
|
contents: none
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Decide whether the needed jobs succeeded or failed
|
||||||
|
uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe
|
||||||
|
with:
|
||||||
|
jobs: ${{ toJSON(needs) }}
|
||||||
|
|
||||||
docker:
|
docker:
|
||||||
name: docker
|
name: docker
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -301,7 +316,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
push: false
|
push: false
|
||||||
context: .
|
context: .
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
/restic
|
/restic
|
||||||
|
/restic.exe
|
||||||
/.vagrant
|
/.vagrant
|
||||||
/.vscode
|
/.vscode
|
||||||
|
@@ -10,13 +10,10 @@ linters:
|
|||||||
# make sure all errors returned by functions are handled
|
# make sure all errors returned by functions are handled
|
||||||
- errcheck
|
- errcheck
|
||||||
|
|
||||||
# find unused code
|
|
||||||
- deadcode
|
|
||||||
|
|
||||||
# show how code can be simplified
|
# show how code can be simplified
|
||||||
- gosimple
|
- gosimple
|
||||||
|
|
||||||
# # make sure code is formatted
|
# make sure code is formatted
|
||||||
- gofmt
|
- gofmt
|
||||||
|
|
||||||
# examine code and report suspicious constructs, such as Printf calls whose
|
# examine code and report suspicious constructs, such as Printf calls whose
|
||||||
@@ -35,12 +32,6 @@ linters:
|
|||||||
# find unused variables, functions, structs, types, etc.
|
# find unused variables, functions, structs, types, etc.
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
# find unused struct fields
|
|
||||||
- structcheck
|
|
||||||
|
|
||||||
# find unused global variables
|
|
||||||
- varcheck
|
|
||||||
|
|
||||||
# parse and typecheck code
|
# parse and typecheck code
|
||||||
- typecheck
|
- typecheck
|
||||||
|
|
||||||
@@ -57,3 +48,6 @@ issues:
|
|||||||
- don't use ALL_CAPS in Go names; use CamelCase
|
- don't use ALL_CAPS in Go names; use CamelCase
|
||||||
# revive: lots of packages don't have such a comment
|
# revive: lots of packages don't have such a comment
|
||||||
- "package-comments: should have a package comment"
|
- "package-comments: should have a package comment"
|
||||||
|
# staticcheck: there's no easy way to replace these packages
|
||||||
|
- "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated"
|
||||||
|
- "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated"
|
||||||
|
436
CHANGELOG.md
436
CHANGELOG.md
@@ -1,3 +1,439 @@
|
|||||||
|
Changelog for restic 0.16.0 (2023-07-31)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
The following sections list the changes in restic 0.16.0 relevant to
|
||||||
|
restic users. The changes are ordered by importance.
|
||||||
|
|
||||||
|
Summary
|
||||||
|
-------
|
||||||
|
|
||||||
|
* Fix #2565: Support "unlimited" in `forget --keep-*` options
|
||||||
|
* Fix #3311: Support non-UTF8 paths as symlink target
|
||||||
|
* Fix #4199: Avoid lock refresh issues on slow network connections
|
||||||
|
* Fix #4274: Improve lock refresh handling after standby
|
||||||
|
* Fix #4319: Correctly clean up status bar output of the `backup` command
|
||||||
|
* Fix #4333: `generate` and `init` no longer silently ignore unexpected arguments
|
||||||
|
* Fix #4400: Ignore missing folders in `rest` backend
|
||||||
|
* Chg #4176: Fix JSON message type of `scan_finished` for the `backup` command
|
||||||
|
* Chg #4201: Require Go 1.20 for Solaris builds
|
||||||
|
* Enh #426: Show progress bar during restore
|
||||||
|
* Enh #719: Add `--retry-lock` option
|
||||||
|
* Enh #1495: Sort snapshots by timestamp in `restic find`
|
||||||
|
* Enh #1759: Add `repair index` and `repair snapshots` commands
|
||||||
|
* Enh #1926: Allow certificate paths to be passed through environment variables
|
||||||
|
* Enh #2359: Provide multi-platform Docker images
|
||||||
|
* Enh #2468: Add support for non-global Azure clouds
|
||||||
|
* Enh #2679: Reduce file fragmentation for local backend
|
||||||
|
* Enh #3328: Reduce memory usage by up to 25%
|
||||||
|
* Enh #3397: Improve accuracy of ETA displayed during backup
|
||||||
|
* Enh #3624: Keep oldest snapshot when there are not enough snapshots
|
||||||
|
* Enh #3698: Add support for Managed / Workload Identity to `azure` backend
|
||||||
|
* Enh #3871: Support `<snapshot>:<subfolder>` syntax to select subfolders
|
||||||
|
* Enh #3941: Support `--group-by` for backup parent selection
|
||||||
|
* Enh #4130: Cancel current command if cache becomes unusable
|
||||||
|
* Enh #4159: Add `--human-readable` option to `ls` and `find` commands
|
||||||
|
* Enh #4188: Include restic version in snapshot metadata
|
||||||
|
* Enh #4220: Add `jq` binary to Docker image
|
||||||
|
* Enh #4226: Allow specifying region of new buckets in the `gs` backend
|
||||||
|
* Enh #4375: Add support for extended attributes on symlinks
|
||||||
|
|
||||||
|
Details
|
||||||
|
-------
|
||||||
|
|
||||||
|
* Bugfix #2565: Support "unlimited" in `forget --keep-*` options
|
||||||
|
|
||||||
|
Restic would previously forget snapshots that should have been kept when a negative value was
|
||||||
|
passed to the `--keep-*` options. Negative values are now forbidden. To keep all snapshots,
|
||||||
|
the special value `unlimited` is now supported. For example, `--keep-monthly unlimited`
|
||||||
|
will keep all monthly snapshots.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2565
|
||||||
|
https://github.com/restic/restic/pull/4234
|
||||||
|
|
||||||
|
* Bugfix #3311: Support non-UTF8 paths as symlink target
|
||||||
|
|
||||||
|
Earlier restic versions did not correctly `backup` and `restore` symlinks that contain a
|
||||||
|
non-UTF8 target. Note that this only affected systems that still use a non-Unicode encoding
|
||||||
|
for filesystem paths.
|
||||||
|
|
||||||
|
The repository format is now extended to add support for such symlinks. Please note that
|
||||||
|
snapshots must have been created with at least restic version 0.16.0 for `restore` to
|
||||||
|
correctly handle non-UTF8 symlink targets when restoring them.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3311
|
||||||
|
https://github.com/restic/restic/pull/3802
|
||||||
|
|
||||||
|
* Bugfix #4199: Avoid lock refresh issues on slow network connections
|
||||||
|
|
||||||
|
On network connections with a low upload speed, backups and other operations could fail with
|
||||||
|
the error message `Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed by reworking the lock refresh handling.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4199
|
||||||
|
https://github.com/restic/restic/pull/4304
|
||||||
|
|
||||||
|
* Bugfix #4274: Improve lock refresh handling after standby
|
||||||
|
|
||||||
|
If the restic process was stopped or the host running restic entered standby during a long
|
||||||
|
running operation such as a backup, this previously resulted in the operation failing with
|
||||||
|
`Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed such that restic first checks whether it is safe to continue the current
|
||||||
|
operation and only throws an error if not.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4274
|
||||||
|
https://github.com/restic/restic/pull/4374
|
||||||
|
|
||||||
|
* Bugfix #4319: Correctly clean up status bar output of the `backup` command
|
||||||
|
|
||||||
|
Due to a regression in restic 0.15.2, the status bar of the `backup` command could leave some
|
||||||
|
output behind. This happened if filenames were printed that are wider than the current
|
||||||
|
terminal width. This has now been fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4319
|
||||||
|
https://github.com/restic/restic/pull/4318
|
||||||
|
|
||||||
|
* Bugfix #4333: `generate` and `init` no longer silently ignore unexpected arguments
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4333
|
||||||
|
|
||||||
|
* Bugfix #4400: Ignore missing folders in `rest` backend
|
||||||
|
|
||||||
|
If a repository accessed via the REST backend was missing folders, then restic would fail with
|
||||||
|
an error while trying to list the data in the repository. This has been now fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/rest-server/issues/235
|
||||||
|
https://github.com/restic/restic/pull/4400
|
||||||
|
|
||||||
|
* Change #4176: Fix JSON message type of `scan_finished` for the `backup` command
|
||||||
|
|
||||||
|
Restic incorrectly set the `message_type` of the `scan_finished` message to `status`
|
||||||
|
instead of `verbose_status`. This has now been corrected so that the messages report the
|
||||||
|
correct type.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4176
|
||||||
|
|
||||||
|
* Change #4201: Require Go 1.20 for Solaris builds
|
||||||
|
|
||||||
|
Building restic on Solaris now requires Go 1.20, as the library used to access Azure uses the
|
||||||
|
mmap syscall, which is only available on Solaris starting from Go 1.20. All other platforms
|
||||||
|
however continue to build with Go 1.18.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4201
|
||||||
|
|
||||||
|
* Enhancement #426: Show progress bar during restore
|
||||||
|
|
||||||
|
The `restore` command now shows a progress report while restoring files.
|
||||||
|
|
||||||
|
Example: `[0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB`
|
||||||
|
|
||||||
|
JSON output is now also supported.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/426
|
||||||
|
https://github.com/restic/restic/issues/3413
|
||||||
|
https://github.com/restic/restic/issues/3627
|
||||||
|
https://github.com/restic/restic/pull/3991
|
||||||
|
https://github.com/restic/restic/pull/4314
|
||||||
|
https://forum.restic.net/t/progress-bar-for-restore/5210
|
||||||
|
|
||||||
|
* Enhancement #719: Add `--retry-lock` option
|
||||||
|
|
||||||
|
This option allows specifying a duration for which restic will wait if the repository is
|
||||||
|
already locked.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/719
|
||||||
|
https://github.com/restic/restic/pull/2214
|
||||||
|
https://github.com/restic/restic/pull/4107
|
||||||
|
|
||||||
|
* Enhancement #1495: Sort snapshots by timestamp in `restic find`
|
||||||
|
|
||||||
|
The `find` command used to print snapshots in an arbitrary order. Restic now prints snapshots
|
||||||
|
sorted by timestamp.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1495
|
||||||
|
https://github.com/restic/restic/pull/4409
|
||||||
|
|
||||||
|
* Enhancement #1759: Add `repair index` and `repair snapshots` commands
|
||||||
|
|
||||||
|
The `rebuild-index` command has been renamed to `repair index`. The old name will still work,
|
||||||
|
but is deprecated.
|
||||||
|
|
||||||
|
When a snapshot was damaged, the only option up to now was to completely forget the snapshot,
|
||||||
|
even if only some unimportant files in it were damaged and other files were still fine.
|
||||||
|
|
||||||
|
Restic now has a `repair snapshots` command, which can salvage any non-damaged files and parts
|
||||||
|
of files in the snapshots by removing damaged directories and missing file contents. Please
|
||||||
|
note that the damaged data may still be lost and see the "Troubleshooting" section in the
|
||||||
|
documentation for more details.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1759
|
||||||
|
https://github.com/restic/restic/issues/1714
|
||||||
|
https://github.com/restic/restic/issues/1798
|
||||||
|
https://github.com/restic/restic/issues/2334
|
||||||
|
https://github.com/restic/restic/pull/2876
|
||||||
|
https://forum.restic.net/t/corrupted-repo-how-to-repair/799
|
||||||
|
https://forum.restic.net/t/recovery-options-for-damaged-repositories/1571
|
||||||
|
|
||||||
|
* Enhancement #1926: Allow certificate paths to be passed through environment variables
|
||||||
|
|
||||||
|
Restic will now read paths to certificates from the environment variables `RESTIC_CACERT` or
|
||||||
|
`RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` are not specified.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1926
|
||||||
|
https://github.com/restic/restic/pull/4384
|
||||||
|
|
||||||
|
* Enhancement #2359: Provide multi-platform Docker images
|
||||||
|
|
||||||
|
The official Docker images are now built for the architectures linux/386, linux/amd64,
|
||||||
|
linux/arm and linux/arm64.
|
||||||
|
|
||||||
|
As an alternative to the Docker Hub, the Docker images are also available on ghcr.io, the GitHub
|
||||||
|
Container Registry.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2359
|
||||||
|
https://github.com/restic/restic/issues/4269
|
||||||
|
https://github.com/restic/restic/pull/4364
|
||||||
|
|
||||||
|
* Enhancement #2468: Add support for non-global Azure clouds
|
||||||
|
|
||||||
|
The `azure` backend previously only supported storages using the global domain
|
||||||
|
`core.windows.net`. This meant that backups to other domains such as Azure China
|
||||||
|
(`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not supported.
|
||||||
|
Restic now allows overriding the global domain using the environment variable
|
||||||
|
`AZURE_ENDPOINT_SUFFIX`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2468
|
||||||
|
https://github.com/restic/restic/pull/4387
|
||||||
|
|
||||||
|
* Enhancement #2679: Reduce file fragmentation for local backend
|
||||||
|
|
||||||
|
Before this change, local backend files could become fragmented. Now restic will try to
|
||||||
|
preallocate space for pack files to avoid their fragmentation.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2679
|
||||||
|
https://github.com/restic/restic/pull/3261
|
||||||
|
|
||||||
|
* Enhancement #3328: Reduce memory usage by up to 25%
|
||||||
|
|
||||||
|
The in-memory index has been optimized to be more garbage collection friendly. Restic now
|
||||||
|
defaults to `GOGC=50` to run the Go garbage collector more frequently.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3328
|
||||||
|
https://github.com/restic/restic/pull/4352
|
||||||
|
https://github.com/restic/restic/pull/4353
|
||||||
|
|
||||||
|
* Enhancement #3397: Improve accuracy of ETA displayed during backup
|
||||||
|
|
||||||
|
Restic's `backup` command displayed an ETA that did not adapt when the rate of progress made
|
||||||
|
during the backup changed during the course of the backup.
|
||||||
|
|
||||||
|
Restic now uses recent progress when computing the ETA. It is important to realize that the
|
||||||
|
estimate may still be wrong, because restic cannot predict the future, but the hope is that the
|
||||||
|
ETA will be more accurate in most cases.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3397
|
||||||
|
https://github.com/restic/restic/pull/3563
|
||||||
|
|
||||||
|
* Enhancement #3624: Keep oldest snapshot when there are not enough snapshots
|
||||||
|
|
||||||
|
The `forget` command now additionally preserves the oldest snapshot if fewer snapshots than
|
||||||
|
allowed by the `--keep-*` parameters would otherwise be kept. This maximizes the amount of
|
||||||
|
history kept within the specified limits.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3624
|
||||||
|
https://github.com/restic/restic/pull/4366
|
||||||
|
https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
|
||||||
|
|
||||||
|
* Enhancement #3698: Add support for Managed / Workload Identity to `azure` backend
|
||||||
|
|
||||||
|
Restic now additionally supports authenticating to Azure using Workload Identity or Managed
|
||||||
|
Identity credentials, which are automatically injected in several environments such as a
|
||||||
|
managed Kubernetes cluster.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3698
|
||||||
|
https://github.com/restic/restic/pull/4029
|
||||||
|
|
||||||
|
* Enhancement #3871: Support `<snapshot>:<subfolder>` syntax to select subfolders
|
||||||
|
|
||||||
|
Commands like `diff` or `restore` always worked with the full snapshot. This did not allow
|
||||||
|
comparing only a specific subfolder or only restoring that folder (`restore --include
|
||||||
|
subfolder` filters the restored files, but still creates the directories included in
|
||||||
|
`subfolder`).
|
||||||
|
|
||||||
|
The commands `diff`, `dump`, `ls` and `restore` now support the `<snapshot>:<subfolder>`
|
||||||
|
syntax, where `snapshot` is the ID of a snapshot (or the string `latest`) and `subfolder` is a
|
||||||
|
path within the snapshot. The commands will then only work with the specified path of the
|
||||||
|
snapshot. The `subfolder` must be a path to a folder as returned by `ls`. Two examples:
|
||||||
|
|
||||||
|
`restic restore -t target latest:/some/path` `restic diff 12345678:/some/path
|
||||||
|
90abcef:/some/path`
|
||||||
|
|
||||||
|
For debugging purposes, the `cat` command now supports `cat tree <snapshot>:<subfolder>` to
|
||||||
|
return the directory metadata for the given subfolder.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3871
|
||||||
|
https://github.com/restic/restic/pull/4334
|
||||||
|
|
||||||
|
* Enhancement #3941: Support `--group-by` for backup parent selection
|
||||||
|
|
||||||
|
Previously, the `backup` command by default selected the parent snapshot based on the
|
||||||
|
hostname and the backup targets. When the backup path list changed, the `backup` command was
|
||||||
|
unable to determine a suitable parent snapshot and had to read all files again.
|
||||||
|
|
||||||
|
The new `--group-by` option for the `backup` command allows filtering snapshots for the
|
||||||
|
parent selection by `host`, `paths` and `tags`. It defaults to `host,paths` which selects the
|
||||||
|
latest snapshot with hostname and paths matching those of the backup run. This matches the
|
||||||
|
behavior of prior restic versions.
|
||||||
|
|
||||||
|
The new `--group-by` option should be set to the same value as passed to `forget --group-by`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3941
|
||||||
|
https://github.com/restic/restic/pull/4081
|
||||||
|
|
||||||
|
* Enhancement #4130: Cancel current command if cache becomes unusable
|
||||||
|
|
||||||
|
If the cache directory was removed or ran out of space while restic was running, this would
|
||||||
|
previously cause further caching attempts to fail and thereby drastically slow down the
|
||||||
|
command execution. Now, the currently running command is instead canceled.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4130
|
||||||
|
https://github.com/restic/restic/pull/4166
|
||||||
|
|
||||||
|
* Enhancement #4159: Add `--human-readable` option to `ls` and `find` commands
|
||||||
|
|
||||||
|
Previously, when using the `-l` option with the `ls` and `find` commands, the displayed size
|
||||||
|
was always in bytes, without an option for a more human readable format such as MiB or GiB.
|
||||||
|
|
||||||
|
The new `--human-readable` option will convert longer size values into more human friendly
|
||||||
|
values with an appropriate suffix depending on the output size. For example, a size of
|
||||||
|
`14680064` will be shown as `14.000 MiB`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4159
|
||||||
|
https://github.com/restic/restic/pull/4351
|
||||||
|
|
||||||
|
* Enhancement #4188: Include restic version in snapshot metadata
|
||||||
|
|
||||||
|
The restic version used to backup a snapshot is now included in its metadata and shown when
|
||||||
|
inspecting a snapshot using `restic cat snapshot <snapshotID>` or `restic snapshots
|
||||||
|
--json`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4188
|
||||||
|
https://github.com/restic/restic/pull/4378
|
||||||
|
|
||||||
|
* Enhancement #4220: Add `jq` binary to Docker image
|
||||||
|
|
||||||
|
The Docker image now contains `jq`, which can be useful to process JSON data output by restic.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4220
|
||||||
|
|
||||||
|
* Enhancement #4226: Allow specifying region of new buckets in the `gs` backend
|
||||||
|
|
||||||
|
Previously, buckets used by the Google Cloud Storage backend would always get created in the
|
||||||
|
"us" region. It is now possible to specify the region where a bucket should be created by using
|
||||||
|
the `-o gs.region=us` option.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4226
|
||||||
|
|
||||||
|
* Enhancement #4375: Add support for extended attributes on symlinks
|
||||||
|
|
||||||
|
Restic now supports extended attributes on symlinks when backing up, restoring, or
|
||||||
|
FUSE-mounting snapshots. This includes, for example, the `security.selinux` xattr on Linux
|
||||||
|
distributions that use SELinux.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4375
|
||||||
|
https://github.com/restic/restic/pull/4379
|
||||||
|
|
||||||
|
|
||||||
|
Changelog for restic 0.15.2 (2023-04-24)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
The following sections list the changes in restic 0.15.2 relevant to
|
||||||
|
restic users. The changes are ordered by importance.
|
||||||
|
|
||||||
|
Summary
|
||||||
|
-------
|
||||||
|
|
||||||
|
* Sec #4275: Update golang.org/x/net to address CVE-2022-41723
|
||||||
|
* Fix #2260: Sanitize filenames printed by `backup` during processing
|
||||||
|
* Fix #4211: Make `dump` interpret `--host` and `--path` correctly
|
||||||
|
* Fix #4239: Correct number of blocks reported in mount point
|
||||||
|
* Fix #4253: Minimize risk of spurious filesystem loops with `mount`
|
||||||
|
* Enh #4180: Add release binaries for riscv64 architecture on Linux
|
||||||
|
* Enh #4219: Upgrade Minio to version 7.0.49
|
||||||
|
|
||||||
|
Details
|
||||||
|
-------
|
||||||
|
|
||||||
|
* Security #4275: Update golang.org/x/net to address CVE-2022-41723
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4275
|
||||||
|
https://github.com/restic/restic/pull/4213
|
||||||
|
|
||||||
|
* Bugfix #2260: Sanitize filenames printed by `backup` during processing
|
||||||
|
|
||||||
|
The `backup` command would previously not sanitize the filenames it printed during
|
||||||
|
processing, potentially causing newlines or terminal control characters to mangle the
|
||||||
|
status output or even change the state of a terminal.
|
||||||
|
|
||||||
|
Filenames are now checked and quoted if they contain non-printable or non-Unicode
|
||||||
|
characters.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2260
|
||||||
|
https://github.com/restic/restic/issues/4191
|
||||||
|
https://github.com/restic/restic/pull/4192
|
||||||
|
|
||||||
|
* Bugfix #4211: Make `dump` interpret `--host` and `--path` correctly
|
||||||
|
|
||||||
|
A regression in restic 0.15.0 caused `dump` to confuse its `--host=<host>` and
|
||||||
|
`--path=<path>` options: it looked for snapshots with paths called `<host>` from hosts
|
||||||
|
called `<path>`. It now treats the options as intended.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4211
|
||||||
|
https://github.com/restic/restic/pull/4212
|
||||||
|
|
||||||
|
* Bugfix #4239: Correct number of blocks reported in mount point
|
||||||
|
|
||||||
|
Restic mount points reported an incorrect number of 512-byte (POSIX standard) blocks for
|
||||||
|
files and links due to a rounding bug. In particular, empty files were reported as taking one
|
||||||
|
block instead of zero.
|
||||||
|
|
||||||
|
The rounding is now fixed: the number of blocks reported is the file size (or link target size)
|
||||||
|
divided by 512 and rounded up to a whole number.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4239
|
||||||
|
https://github.com/restic/restic/pull/4240
|
||||||
|
|
||||||
|
* Bugfix #4253: Minimize risk of spurious filesystem loops with `mount`
|
||||||
|
|
||||||
|
When a backup contains a directory that has the same name as its parent, say `a/b/b`, and the GNU
|
||||||
|
`find` command was run on this backup in a restic mount, `find` would refuse to traverse the
|
||||||
|
lowest `b` directory, instead printing `File system loop detected`. This was due to the way the
|
||||||
|
restic mount command generates inode numbers for directories in the mount point.
|
||||||
|
|
||||||
|
The rule for generating these inode numbers was changed in 0.15.0. It has now been changed again
|
||||||
|
to avoid this issue. A perfect rule does not exist, but the probability of this behavior
|
||||||
|
occurring is now extremely small.
|
||||||
|
|
||||||
|
When it does occur, the mount point is not broken, and scripts that traverse the mount point
|
||||||
|
should work as long as they don't rely on inode numbers for detecting filesystem loops.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4253
|
||||||
|
https://github.com/restic/restic/pull/4255
|
||||||
|
|
||||||
|
* Enhancement #4180: Add release binaries for riscv64 architecture on Linux
|
||||||
|
|
||||||
|
Builds for the `riscv64` architecture on Linux are now included in the release binaries.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4180
|
||||||
|
|
||||||
|
* Enhancement #4219: Upgrade Minio to version 7.0.49
|
||||||
|
|
||||||
|
The upgraded version now allows use of the `ap-southeast-4` region (Melbourne).
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4219
|
||||||
|
|
||||||
|
|
||||||
Changelog for restic 0.15.1 (2023-01-30)
|
Changelog for restic 0.15.1 (2023-01-30)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
|
@@ -58,6 +58,19 @@ Please be aware that the debug log file will contain potentially sensitive
|
|||||||
things like file and directory names, so please either redact it before
|
things like file and directory names, so please either redact it before
|
||||||
uploading it somewhere or post only the parts that are really relevant.
|
uploading it somewhere or post only the parts that are really relevant.
|
||||||
|
|
||||||
|
If restic gets stuck, please also include a stacktrace in the description.
|
||||||
|
On non-Windows systems, you can send a SIGQUIT signal to restic or press
|
||||||
|
`Ctrl-\` to achieve the same result. This causes restic to print a stacktrace
|
||||||
|
and then exit immediatelly. This will not damage your repository, however,
|
||||||
|
it might be necessary to manually clean up stale lock files using
|
||||||
|
`restic unlock`.
|
||||||
|
|
||||||
|
On Windows, please set the environment variable `RESTIC_DEBUG_STACKTRACE_SIGINT`
|
||||||
|
to `true` and press `Ctrl-C` to create a stacktrace.
|
||||||
|
|
||||||
|
If you think restic uses too much memory or a too large cache directory, then
|
||||||
|
please include the output of `restic stats --mode debug`.
|
||||||
|
|
||||||
|
|
||||||
Development Environment
|
Development Environment
|
||||||
=======================
|
=======================
|
||||||
@@ -78,10 +91,40 @@ Then use the `go` tool to build restic:
|
|||||||
$ ./restic version
|
$ ./restic version
|
||||||
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
|
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
|
||||||
|
|
||||||
|
To create a debug build use:
|
||||||
|
|
||||||
|
$ go build -tags debug ./cmd/restic
|
||||||
|
|
||||||
You can run all tests with the following command:
|
You can run all tests with the following command:
|
||||||
|
|
||||||
$ go test ./...
|
$ go test ./...
|
||||||
|
|
||||||
|
|
||||||
|
Performance and Memory Usage Issues
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Debug builds of restic support the `--block-profile`, `--cpu-profile`,
|
||||||
|
`--mem-profile`, and `--trace-profile` options which collect performance data
|
||||||
|
that later on can be analyzed using the go tools:
|
||||||
|
|
||||||
|
$ restic --cpu-profile . [...]
|
||||||
|
$ go tool pprof -http localhost:12345 cpu.pprof
|
||||||
|
|
||||||
|
To analyze a trace profile use `go tool trace -http=localhost:12345 trace.out`.
|
||||||
|
|
||||||
|
As the memory usage of restic changes over time, it may be useful to capture a
|
||||||
|
snapshot of the current heap. This is possible using then `--listen-profile`
|
||||||
|
option. Then while restic runs you can query and afterwards analyze the heap statistics.
|
||||||
|
|
||||||
|
$ restic --listen-profile localhost:12345 [...]
|
||||||
|
$ curl http://localhost:12345/debug/pprof/heap -o heap.pprof
|
||||||
|
$ go tool pprof -http localhost:12345 heap.pprof
|
||||||
|
|
||||||
|
Further useful tools are setting the environment variable `GODEBUG=gctrace=1`,
|
||||||
|
which provides information about garbage collector runs. For a graphical variant
|
||||||
|
combine this with gcvis.
|
||||||
|
|
||||||
|
|
||||||
Providing Patches
|
Providing Patches
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
6
build.go
6
build.go
@@ -380,6 +380,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
solarisMinVersion := GoVersion{Major: 1, Minor: 20, Patch: 0}
|
||||||
|
if env["GOARCH"] == "solaris" && !goVersion.AtLeast(solarisMinVersion) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Detected version %s is too old, restic requires at least %s for Solaris\n", goVersion, solarisMinVersion)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
verbosePrintf("detected Go version %v\n", goVersion)
|
verbosePrintf("detected Go version %v\n", goVersion)
|
||||||
|
|
||||||
preserveSymbols := false
|
preserveSymbols := false
|
||||||
|
12
changelog/0.15.2_2023-04-24/issue-2260
Normal file
12
changelog/0.15.2_2023-04-24/issue-2260
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Bugfix: Sanitize filenames printed by `backup` during processing
|
||||||
|
|
||||||
|
The `backup` command would previously not sanitize the filenames it printed
|
||||||
|
during processing, potentially causing newlines or terminal control characters
|
||||||
|
to mangle the status output or even change the state of a terminal.
|
||||||
|
|
||||||
|
Filenames are now checked and quoted if they contain non-printable or
|
||||||
|
non-Unicode characters.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2260
|
||||||
|
https://github.com/restic/restic/issues/4191
|
||||||
|
https://github.com/restic/restic/pull/4192
|
8
changelog/0.15.2_2023-04-24/issue-4211
Normal file
8
changelog/0.15.2_2023-04-24/issue-4211
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Make `dump` interpret `--host` and `--path` correctly
|
||||||
|
|
||||||
|
A regression in restic 0.15.0 caused `dump` to confuse its `--host=<host>` and
|
||||||
|
`--path=<path>` options: it looked for snapshots with paths called `<host>`
|
||||||
|
from hosts called `<path>`. It now treats the options as intended.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4211
|
||||||
|
https://github.com/restic/restic/pull/4212
|
11
changelog/0.15.2_2023-04-24/issue-4239
Normal file
11
changelog/0.15.2_2023-04-24/issue-4239
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Correct number of blocks reported in mount point
|
||||||
|
|
||||||
|
Restic mount points reported an incorrect number of 512-byte (POSIX standard)
|
||||||
|
blocks for files and links due to a rounding bug. In particular, empty files
|
||||||
|
were reported as taking one block instead of zero.
|
||||||
|
|
||||||
|
The rounding is now fixed: the number of blocks reported is the file size
|
||||||
|
(or link target size) divided by 512 and rounded up to a whole number.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4239
|
||||||
|
https://github.com/restic/restic/pull/4240
|
18
changelog/0.15.2_2023-04-24/issue-4253
Normal file
18
changelog/0.15.2_2023-04-24/issue-4253
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
Bugfix: Minimize risk of spurious filesystem loops with `mount`
|
||||||
|
|
||||||
|
When a backup contains a directory that has the same name as its parent, say
|
||||||
|
`a/b/b`, and the GNU `find` command was run on this backup in a restic mount,
|
||||||
|
`find` would refuse to traverse the lowest `b` directory, instead printing
|
||||||
|
`File system loop detected`. This was due to the way the restic mount command
|
||||||
|
generates inode numbers for directories in the mount point.
|
||||||
|
|
||||||
|
The rule for generating these inode numbers was changed in 0.15.0. It has
|
||||||
|
now been changed again to avoid this issue. A perfect rule does not exist,
|
||||||
|
but the probability of this behavior occurring is now extremely small.
|
||||||
|
|
||||||
|
When it does occur, the mount point is not broken, and scripts that traverse
|
||||||
|
the mount point should work as long as they don't rely on inode numbers for
|
||||||
|
detecting filesystem loops.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4253
|
||||||
|
https://github.com/restic/restic/pull/4255
|
4
changelog/0.15.2_2023-04-24/issue-4275
Normal file
4
changelog/0.15.2_2023-04-24/issue-4275
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
Security: Update golang.org/x/net to address CVE-2022-41723
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4275
|
||||||
|
https://github.com/restic/restic/pull/4213
|
6
changelog/0.15.2_2023-04-24/pull-4180
Normal file
6
changelog/0.15.2_2023-04-24/pull-4180
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Enhancement: Add release binaries for riscv64 architecture on Linux
|
||||||
|
|
||||||
|
Builds for the `riscv64` architecture on Linux are now included in the
|
||||||
|
release binaries.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4180
|
5
changelog/0.15.2_2023-04-24/pull-4219
Normal file
5
changelog/0.15.2_2023-04-24/pull-4219
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Enhancement: Upgrade Minio to version 7.0.49
|
||||||
|
|
||||||
|
The upgraded version now allows use of the `ap-southeast-4` region (Melbourne).
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4219
|
7
changelog/0.16.0_2023-07-31/issue-1495
Normal file
7
changelog/0.16.0_2023-07-31/issue-1495
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Sort snapshots by timestamp in `restic find`
|
||||||
|
|
||||||
|
The `find` command used to print snapshots in an arbitrary order. Restic now
|
||||||
|
prints snapshots sorted by timestamp.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1495
|
||||||
|
https://github.com/restic/restic/pull/4409
|
21
changelog/0.16.0_2023-07-31/issue-1759
Normal file
21
changelog/0.16.0_2023-07-31/issue-1759
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
Enhancement: Add `repair index` and `repair snapshots` commands
|
||||||
|
|
||||||
|
The `rebuild-index` command has been renamed to `repair index`. The old name
|
||||||
|
will still work, but is deprecated.
|
||||||
|
|
||||||
|
When a snapshot was damaged, the only option up to now was to completely forget
|
||||||
|
the snapshot, even if only some unimportant files in it were damaged and other
|
||||||
|
files were still fine.
|
||||||
|
|
||||||
|
Restic now has a `repair snapshots` command, which can salvage any non-damaged
|
||||||
|
files and parts of files in the snapshots by removing damaged directories and
|
||||||
|
missing file contents. Please note that the damaged data may still be lost
|
||||||
|
and see the "Troubleshooting" section in the documentation for more details.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1759
|
||||||
|
https://github.com/restic/restic/issues/1714
|
||||||
|
https://github.com/restic/restic/issues/1798
|
||||||
|
https://github.com/restic/restic/issues/2334
|
||||||
|
https://github.com/restic/restic/pull/2876
|
||||||
|
https://forum.restic.net/t/corrupted-repo-how-to-repair/799
|
||||||
|
https://forum.restic.net/t/recovery-options-for-damaged-repositories/1571
|
8
changelog/0.16.0_2023-07-31/issue-1926
Normal file
8
changelog/0.16.0_2023-07-31/issue-1926
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Allow certificate paths to be passed through environment variables
|
||||||
|
|
||||||
|
Restic will now read paths to certificates from the environment variables
|
||||||
|
`RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert`
|
||||||
|
are not specified.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1926
|
||||||
|
https://github.com/restic/restic/pull/4384
|
11
changelog/0.16.0_2023-07-31/issue-2359
Normal file
11
changelog/0.16.0_2023-07-31/issue-2359
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Enhancement: Provide multi-platform Docker images
|
||||||
|
|
||||||
|
The official Docker images are now built for the architectures linux/386,
|
||||||
|
linux/amd64, linux/arm and linux/arm64.
|
||||||
|
|
||||||
|
As an alternative to the Docker Hub, the Docker images are also
|
||||||
|
available on ghcr.io, the GitHub Container Registry.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2359
|
||||||
|
https://github.com/restic/restic/issues/4269
|
||||||
|
https://github.com/restic/restic/pull/4364
|
10
changelog/0.16.0_2023-07-31/issue-2468
Normal file
10
changelog/0.16.0_2023-07-31/issue-2468
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Enhancement: Add support for non-global Azure clouds
|
||||||
|
|
||||||
|
The `azure` backend previously only supported storages using the global domain
|
||||||
|
`core.windows.net`. This meant that backups to other domains such as Azure
|
||||||
|
China (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were
|
||||||
|
not supported. Restic now allows overriding the global domain using the
|
||||||
|
environment variable `AZURE_ENDPOINT_SUFFIX`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2468
|
||||||
|
https://github.com/restic/restic/pull/4387
|
10
changelog/0.16.0_2023-07-31/issue-2565
Normal file
10
changelog/0.16.0_2023-07-31/issue-2565
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Bugfix: Support "unlimited" in `forget --keep-*` options
|
||||||
|
|
||||||
|
Restic would previously forget snapshots that should have been kept when a
|
||||||
|
negative value was passed to the `--keep-*` options. Negative values are now
|
||||||
|
forbidden. To keep all snapshots, the special value `unlimited` is now
|
||||||
|
supported. For example, `--keep-monthly unlimited` will keep all monthly
|
||||||
|
snapshots.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2565
|
||||||
|
https://github.com/restic/restic/pull/4234
|
12
changelog/0.16.0_2023-07-31/issue-3311
Normal file
12
changelog/0.16.0_2023-07-31/issue-3311
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Bugfix: Support non-UTF8 paths as symlink target
|
||||||
|
|
||||||
|
Earlier restic versions did not correctly `backup` and `restore` symlinks that
|
||||||
|
contain a non-UTF8 target. Note that this only affected systems that still use
|
||||||
|
a non-Unicode encoding for filesystem paths.
|
||||||
|
|
||||||
|
The repository format is now extended to add support for such symlinks. Please
|
||||||
|
note that snapshots must have been created with at least restic version 0.16.0
|
||||||
|
for `restore` to correctly handle non-UTF8 symlink targets when restoring them.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3311
|
||||||
|
https://github.com/restic/restic/pull/3802
|
9
changelog/0.16.0_2023-07-31/issue-3328
Normal file
9
changelog/0.16.0_2023-07-31/issue-3328
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Reduce memory usage by up to 25%
|
||||||
|
|
||||||
|
The in-memory index has been optimized to be more garbage collection friendly.
|
||||||
|
Restic now defaults to `GOGC=50` to run the Go garbage collector more
|
||||||
|
frequently.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3328
|
||||||
|
https://github.com/restic/restic/pull/4352
|
||||||
|
https://github.com/restic/restic/pull/4353
|
11
changelog/0.16.0_2023-07-31/issue-3397
Normal file
11
changelog/0.16.0_2023-07-31/issue-3397
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Enhancement: Improve accuracy of ETA displayed during backup
|
||||||
|
|
||||||
|
Restic's `backup` command displayed an ETA that did not adapt when the rate of
|
||||||
|
progress made during the backup changed during the course of the backup.
|
||||||
|
|
||||||
|
Restic now uses recent progress when computing the ETA. It is important to
|
||||||
|
realize that the estimate may still be wrong, because restic cannot predict
|
||||||
|
the future, but the hope is that the ETA will be more accurate in most cases.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3397
|
||||||
|
https://github.com/restic/restic/pull/3563
|
9
changelog/0.16.0_2023-07-31/issue-3624
Normal file
9
changelog/0.16.0_2023-07-31/issue-3624
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Keep oldest snapshot when there are not enough snapshots
|
||||||
|
|
||||||
|
The `forget` command now additionally preserves the oldest snapshot if fewer
|
||||||
|
snapshots than allowed by the `--keep-*` parameters would otherwise be kept.
|
||||||
|
This maximizes the amount of history kept within the specified limits.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3624
|
||||||
|
https://github.com/restic/restic/pull/4366
|
||||||
|
https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
|
8
changelog/0.16.0_2023-07-31/issue-3698
Normal file
8
changelog/0.16.0_2023-07-31/issue-3698
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add support for Managed / Workload Identity to `azure` backend
|
||||||
|
|
||||||
|
Restic now additionally supports authenticating to Azure using Workload
|
||||||
|
Identity or Managed Identity credentials, which are automatically injected in
|
||||||
|
several environments such as a managed Kubernetes cluster.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3698
|
||||||
|
https://github.com/restic/restic/pull/4029
|
22
changelog/0.16.0_2023-07-31/issue-3871
Normal file
22
changelog/0.16.0_2023-07-31/issue-3871
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Enhancement: Support `<snapshot>:<subfolder>` syntax to select subfolders
|
||||||
|
|
||||||
|
Commands like `diff` or `restore` always worked with the full snapshot. This
|
||||||
|
did not allow comparing only a specific subfolder or only restoring that folder
|
||||||
|
(`restore --include subfolder` filters the restored files, but still creates the
|
||||||
|
directories included in `subfolder`).
|
||||||
|
|
||||||
|
The commands `diff`, `dump`, `ls` and `restore` now support the
|
||||||
|
`<snapshot>:<subfolder>` syntax, where `snapshot` is the ID of a snapshot (or
|
||||||
|
the string `latest`) and `subfolder` is a path within the snapshot. The
|
||||||
|
commands will then only work with the specified path of the snapshot. The
|
||||||
|
`subfolder` must be a path to a folder as returned by `ls`. Two examples:
|
||||||
|
|
||||||
|
`restic restore -t target latest:/some/path`
|
||||||
|
`restic diff 12345678:/some/path 90abcef:/some/path`
|
||||||
|
|
||||||
|
For debugging purposes, the `cat` command now supports `cat tree
|
||||||
|
<snapshot>:<subfolder>` to return the directory metadata for the given
|
||||||
|
subfolder.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3871
|
||||||
|
https://github.com/restic/restic/pull/4334
|
17
changelog/0.16.0_2023-07-31/issue-3941
Normal file
17
changelog/0.16.0_2023-07-31/issue-3941
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
Enhancement: Support `--group-by` for backup parent selection
|
||||||
|
|
||||||
|
Previously, the `backup` command by default selected the parent snapshot based
|
||||||
|
on the hostname and the backup targets. When the backup path list changed, the
|
||||||
|
`backup` command was unable to determine a suitable parent snapshot and had to
|
||||||
|
read all files again.
|
||||||
|
|
||||||
|
The new `--group-by` option for the `backup` command allows filtering snapshots
|
||||||
|
for the parent selection by `host`, `paths` and `tags`. It defaults to
|
||||||
|
`host,paths` which selects the latest snapshot with hostname and paths matching
|
||||||
|
those of the backup run. This matches the behavior of prior restic versions.
|
||||||
|
|
||||||
|
The new `--group-by` option should be set to the same value as passed to
|
||||||
|
`forget --group-by`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3941
|
||||||
|
https://github.com/restic/restic/pull/4081
|
9
changelog/0.16.0_2023-07-31/issue-4130
Normal file
9
changelog/0.16.0_2023-07-31/issue-4130
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Cancel current command if cache becomes unusable
|
||||||
|
|
||||||
|
If the cache directory was removed or ran out of space while restic was
|
||||||
|
running, this would previously cause further caching attempts to fail and
|
||||||
|
thereby drastically slow down the command execution. Now, the currently running
|
||||||
|
command is instead canceled.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4130
|
||||||
|
https://github.com/restic/restic/pull/4166
|
12
changelog/0.16.0_2023-07-31/issue-4159
Normal file
12
changelog/0.16.0_2023-07-31/issue-4159
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Enhancement: Add `--human-readable` option to `ls` and `find` commands
|
||||||
|
|
||||||
|
Previously, when using the `-l` option with the `ls` and `find` commands, the
|
||||||
|
displayed size was always in bytes, without an option for a more human readable
|
||||||
|
format such as MiB or GiB.
|
||||||
|
|
||||||
|
The new `--human-readable` option will convert longer size values into more
|
||||||
|
human friendly values with an appropriate suffix depending on the output size.
|
||||||
|
For example, a size of `14680064` will be shown as `14.000 MiB`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4159
|
||||||
|
https://github.com/restic/restic/pull/4351
|
8
changelog/0.16.0_2023-07-31/issue-4188
Normal file
8
changelog/0.16.0_2023-07-31/issue-4188
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Include restic version in snapshot metadata
|
||||||
|
|
||||||
|
The restic version used to backup a snapshot is now included in its metadata
|
||||||
|
and shown when inspecting a snapshot using `restic cat snapshot <snapshotID>`
|
||||||
|
or `restic snapshots --json`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4188
|
||||||
|
https://github.com/restic/restic/pull/4378
|
9
changelog/0.16.0_2023-07-31/issue-4199
Normal file
9
changelog/0.16.0_2023-07-31/issue-4199
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Bugfix: Avoid lock refresh issues on slow network connections
|
||||||
|
|
||||||
|
On network connections with a low upload speed, backups and other operations
|
||||||
|
could fail with the error message `Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed by reworking the lock refresh handling.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4199
|
||||||
|
https://github.com/restic/restic/pull/4304
|
14
changelog/0.16.0_2023-07-31/issue-426
Normal file
14
changelog/0.16.0_2023-07-31/issue-426
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
Enhancement: Show progress bar during restore
|
||||||
|
|
||||||
|
The `restore` command now shows a progress report while restoring files.
|
||||||
|
|
||||||
|
Example: `[0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB`
|
||||||
|
|
||||||
|
JSON output is now also supported.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/426
|
||||||
|
https://github.com/restic/restic/issues/3413
|
||||||
|
https://github.com/restic/restic/issues/3627
|
||||||
|
https://github.com/restic/restic/pull/3991
|
||||||
|
https://github.com/restic/restic/pull/4314
|
||||||
|
https://forum.restic.net/t/progress-bar-for-restore/5210
|
11
changelog/0.16.0_2023-07-31/issue-4274
Normal file
11
changelog/0.16.0_2023-07-31/issue-4274
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Improve lock refresh handling after standby
|
||||||
|
|
||||||
|
If the restic process was stopped or the host running restic entered standby
|
||||||
|
during a long running operation such as a backup, this previously resulted in
|
||||||
|
the operation failing with `Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed such that restic first checks whether it is safe to
|
||||||
|
continue the current operation and only throws an error if not.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4274
|
||||||
|
https://github.com/restic/restic/pull/4374
|
8
changelog/0.16.0_2023-07-31/issue-4375
Normal file
8
changelog/0.16.0_2023-07-31/issue-4375
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add support for extended attributes on symlinks
|
||||||
|
|
||||||
|
Restic now supports extended attributes on symlinks when backing up,
|
||||||
|
restoring, or FUSE-mounting snapshots. This includes, for example, the
|
||||||
|
`security.selinux` xattr on Linux distributions that use SELinux.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4375
|
||||||
|
https://github.com/restic/restic/pull/4379
|
8
changelog/0.16.0_2023-07-31/issue-719
Normal file
8
changelog/0.16.0_2023-07-31/issue-719
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add `--retry-lock` option
|
||||||
|
|
||||||
|
This option allows specifying a duration for which restic will wait if the
|
||||||
|
repository is already locked.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/719
|
||||||
|
https://github.com/restic/restic/pull/2214
|
||||||
|
https://github.com/restic/restic/pull/4107
|
8
changelog/0.16.0_2023-07-31/pull-3261
Normal file
8
changelog/0.16.0_2023-07-31/pull-3261
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Reduce file fragmentation for local backend
|
||||||
|
|
||||||
|
Before this change, local backend files could become fragmented.
|
||||||
|
Now restic will try to preallocate space for pack files to avoid
|
||||||
|
their fragmentation.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2679
|
||||||
|
https://github.com/restic/restic/pull/3261
|
7
changelog/0.16.0_2023-07-31/pull-4176
Normal file
7
changelog/0.16.0_2023-07-31/pull-4176
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Change: Fix JSON message type of `scan_finished` for the `backup` command
|
||||||
|
|
||||||
|
Restic incorrectly set the `message_type` of the `scan_finished` message to
|
||||||
|
`status` instead of `verbose_status`. This has now been corrected so that
|
||||||
|
the messages report the correct type.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4176
|
7
changelog/0.16.0_2023-07-31/pull-4201
Normal file
7
changelog/0.16.0_2023-07-31/pull-4201
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Change: Require Go 1.20 for Solaris builds
|
||||||
|
|
||||||
|
Building restic on Solaris now requires Go 1.20, as the library used to access
|
||||||
|
Azure uses the mmap syscall, which is only available on Solaris starting from
|
||||||
|
Go 1.20. All other platforms however continue to build with Go 1.18.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4201
|
6
changelog/0.16.0_2023-07-31/pull-4220
Normal file
6
changelog/0.16.0_2023-07-31/pull-4220
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Enhancement: Add `jq` binary to Docker image
|
||||||
|
|
||||||
|
The Docker image now contains `jq`, which can be useful to process JSON data
|
||||||
|
output by restic.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4220
|
7
changelog/0.16.0_2023-07-31/pull-4226
Normal file
7
changelog/0.16.0_2023-07-31/pull-4226
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Allow specifying region of new buckets in the `gs` backend
|
||||||
|
|
||||||
|
Previously, buckets used by the Google Cloud Storage backend would always get
|
||||||
|
created in the "us" region. It is now possible to specify the region where a
|
||||||
|
bucket should be created by using the `-o gs.region=us` option.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4226
|
8
changelog/0.16.0_2023-07-31/pull-4318
Normal file
8
changelog/0.16.0_2023-07-31/pull-4318
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Correctly clean up status bar output of the `backup` command
|
||||||
|
|
||||||
|
Due to a regression in restic 0.15.2, the status bar of the `backup` command
|
||||||
|
could leave some output behind. This happened if filenames were printed that
|
||||||
|
are wider than the current terminal width. This has now been fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4319
|
||||||
|
https://github.com/restic/restic/pull/4318
|
3
changelog/0.16.0_2023-07-31/pull-4333
Normal file
3
changelog/0.16.0_2023-07-31/pull-4333
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Bugfix: `generate` and `init` no longer silently ignore unexpected arguments
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4333
|
8
changelog/0.16.0_2023-07-31/pull-4400
Normal file
8
changelog/0.16.0_2023-07-31/pull-4400
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Ignore missing folders in `rest` backend
|
||||||
|
|
||||||
|
If a repository accessed via the REST backend was missing folders, then restic
|
||||||
|
would fail with an error while trying to list the data in the repository. This
|
||||||
|
has been now fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4400
|
||||||
|
https://github.com/restic/rest-server/issues/235
|
@@ -1,16 +1,17 @@
|
|||||||
# The first line must start with Bugfix:, Enhancement: or Change:,
|
# The first line must start with Bugfix:, Enhancement: or Change:,
|
||||||
# including the colon. Use present tense. Remove lines starting with '#'
|
# including the colon. Use present tense and the imperative mood. Remove
|
||||||
# from this template.
|
# lines starting with '#' from this template.
|
||||||
Enhancement: Allow custom bar in the foo command
|
Enhancement: Allow custom bar in the foo command
|
||||||
|
|
||||||
# Describe the problem in the past tense, the new behavior in the present
|
# Describe the problem in the past tense, the new behavior in the present
|
||||||
# tense. Mention the affected commands, backends, operating systems, etc.
|
# tense. Mention the affected commands, backends, operating systems, etc.
|
||||||
# Focus on user-facing behavior, not the implementation.
|
# Focus on user-facing behavior, not the implementation.
|
||||||
|
# Use "Restic now ..." instead of "We have changed ...".
|
||||||
|
|
||||||
Restic foo always used the system-wide bar when deciding how to frob an
|
Restic foo always used the system-wide bar when deciding how to frob an
|
||||||
item in the baz backend. It now permits selecting the bar with --bar or
|
item in the `baz` backend. It now permits selecting the bar with `--bar`
|
||||||
the environment variable RESTIC_BAR. The system-wide bar is still the
|
or the environment variable `RESTIC_BAR`. The system-wide bar is still
|
||||||
default.
|
the default.
|
||||||
|
|
||||||
# The last section is a list of issue, PR and forum URLs.
|
# The last section is a list of issue, PR and forum URLs.
|
||||||
# The first issue ID determines the filename for the changelog entry:
|
# The first issue ID determines the filename for the changelog entry:
|
||||||
|
@@ -62,6 +62,12 @@ func CleanupHandler(c <-chan os.Signal) {
|
|||||||
debug.Log("signal %v received, cleaning up", s)
|
debug.Log("signal %v received, cleaning up", s)
|
||||||
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
||||||
|
|
||||||
|
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
||||||
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
||||||
|
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
||||||
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
||||||
|
}
|
||||||
|
|
||||||
code := 0
|
code := 0
|
||||||
|
|
||||||
if s == syscall.SIGINT {
|
if s == syscall.SIGINT {
|
||||||
@@ -78,5 +84,6 @@ func CleanupHandler(c <-chan os.Signal) {
|
|||||||
// given exit code.
|
// given exit code.
|
||||||
func Exit(code int) {
|
func Exit(code int) {
|
||||||
code = RunCleanupHandlers(code)
|
code = RunCleanupHandlers(code)
|
||||||
|
debug.Log("exiting with status code %d", code)
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
@@ -89,6 +89,7 @@ type BackupOptions struct {
|
|||||||
excludePatternOptions
|
excludePatternOptions
|
||||||
|
|
||||||
Parent string
|
Parent string
|
||||||
|
GroupBy restic.SnapshotGroupByOptions
|
||||||
Force bool
|
Force bool
|
||||||
ExcludeOtherFS bool
|
ExcludeOtherFS bool
|
||||||
ExcludeIfPresent []string
|
ExcludeIfPresent []string
|
||||||
@@ -120,7 +121,9 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdBackup)
|
cmdRoot.AddCommand(cmdBackup)
|
||||||
|
|
||||||
f := cmdBackup.Flags()
|
f := cmdBackup.Flags()
|
||||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)")
|
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)")
|
||||||
|
backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
|
f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
||||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
||||||
|
|
||||||
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
|
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
|
||||||
@@ -305,7 +308,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
// collectRejectByNameFuncs returns a list of all functions which may reject data
|
// collectRejectByNameFuncs returns a list of all functions which may reject data
|
||||||
// from being saved in a snapshot based on path only
|
// from being saved in a snapshot based on path only
|
||||||
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
|
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
|
||||||
// exclude restic cache
|
// exclude restic cache
|
||||||
if repo.Cache != nil {
|
if repo.Cache != nil {
|
||||||
f, err := rejectResticCache(repo)
|
f, err := rejectResticCache(repo)
|
||||||
@@ -340,7 +343,7 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
|
|||||||
|
|
||||||
// collectRejectFuncs returns a list of all functions which may reject data
|
// collectRejectFuncs returns a list of all functions which may reject data
|
||||||
// from being saved in a snapshot based on path and file info
|
// from being saved in a snapshot based on path and file info
|
||||||
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
|
func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
|
||||||
// allowed devices
|
// allowed devices
|
||||||
if opts.ExcludeOtherFS && !opts.Stdin {
|
if opts.ExcludeOtherFS && !opts.Stdin {
|
||||||
f, err := rejectByDevice(targets)
|
f, err := rejectByDevice(targets)
|
||||||
@@ -439,7 +442,18 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
|
|||||||
if snName == "" {
|
if snName == "" {
|
||||||
snName = "latest"
|
snName = "latest"
|
||||||
}
|
}
|
||||||
sn, err := restic.FindFilteredSnapshot(ctx, repo.Backend(), repo, []string{opts.Host}, []restic.TagList{}, targets, &timeStampLimit, snName)
|
f := restic.SnapshotFilter{TimestampLimit: timeStampLimit}
|
||||||
|
if opts.GroupBy.Host {
|
||||||
|
f.Hosts = []string{opts.Host}
|
||||||
|
}
|
||||||
|
if opts.GroupBy.Path {
|
||||||
|
f.Paths = targets
|
||||||
|
}
|
||||||
|
if opts.GroupBy.Tag {
|
||||||
|
f.Tags = []restic.TagList{opts.Tags.Flatten()}
|
||||||
|
}
|
||||||
|
|
||||||
|
sn, _, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
|
||||||
// Snapshot not found is ok if no explicit parent was set
|
// Snapshot not found is ok if no explicit parent was set
|
||||||
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
|
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
|
||||||
err = nil
|
err = nil
|
||||||
@@ -492,20 +506,23 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
progressPrinter.V("lock repository")
|
progressPrinter.V("lock repository")
|
||||||
}
|
}
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
if !opts.DryRun {
|
||||||
defer unlockRepo(lock)
|
var lock *restic.Lock
|
||||||
if err != nil {
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
return err
|
defer unlockRepo(lock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
||||||
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
|
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// rejectFuncs collect functions that can reject items from the backup based on path and file info
|
// rejectFuncs collect functions that can reject items from the backup based on path and file info
|
||||||
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
|
rejectFuncs, err := collectRejectFuncs(opts, targets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -631,6 +648,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||||||
Time: timeStamp,
|
Time: timeStamp,
|
||||||
Hostname: opts.Host,
|
Hostname: opts.Host,
|
||||||
ParentSnapshot: parentSnapshot,
|
ParentSnapshot: parentSnapshot,
|
||||||
|
ProgramVersion: "restic " + version,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
|
569
cmd/restic/cmd_backup_integration_test.go
Normal file
569
cmd/restic/cmd_backup_integration_test.go
Normal file
@@ -0,0 +1,569 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/fs"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
|
||||||
|
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
t.Logf("backing up %v in %v", target, dir)
|
||||||
|
if dir != "" {
|
||||||
|
cleanup := rtest.Chdir(t, dir)
|
||||||
|
defer cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
|
return runBackup(ctx, opts, gopts, term, target)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
|
||||||
|
err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
|
||||||
|
rtest.Assert(t, err == nil, "Error while backing up")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackup(t *testing.T) {
|
||||||
|
testBackup(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupWithFilesystemSnapshots(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
|
||||||
|
testBackup(t, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat1 := dirStats(env.repo)
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
stat2 := dirStats(env.repo)
|
||||||
|
if stat2.size > stat1.size+stat1.size/10 {
|
||||||
|
t.Error("repository size has grown by more than 10 percent")
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
// third backup, explicit incremental
|
||||||
|
opts.Parent = snapshotIDs[0].String()
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 3)
|
||||||
|
|
||||||
|
stat3 := dirStats(env.repo)
|
||||||
|
if stat3.size > stat1.size+stat1.size/10 {
|
||||||
|
t.Error("repository size has grown by more than 10 percent")
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
|
||||||
|
|
||||||
|
// restore all backups and compare
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupWithRelativePath(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
|
||||||
|
// that the correct parent snapshot was used
|
||||||
|
latestSn, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
rtest.Assert(t, latestSn != nil, "missing latest snapshot")
|
||||||
|
rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupParentSelection(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
|
||||||
|
firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// second backup, sibling path
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
// third backup, incremental for the first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
|
||||||
|
|
||||||
|
// test that the correct parent snapshot was used
|
||||||
|
latestSn, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
rtest.Assert(t, latestSn != nil, "missing latest snapshot")
|
||||||
|
rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDryRunBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
dryOpts := BackupOptions{DryRun: true}
|
||||||
|
|
||||||
|
// dry run before first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 0)
|
||||||
|
packIDs := testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Assert(t, len(packIDs) == 0,
|
||||||
|
"expected no data, got %v", snapshotIDs)
|
||||||
|
indexIDs := testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Assert(t, len(indexIDs) == 0,
|
||||||
|
"expected no index, got %v", snapshotIDs)
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 1)
|
||||||
|
packIDs = testRunList(t, "packs", env.gopts)
|
||||||
|
indexIDs = testRunList(t, "index", env.gopts)
|
||||||
|
|
||||||
|
// dry run between backups
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDsAfter := testListSnapshots(t, env.gopts, 1)
|
||||||
|
rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
|
||||||
|
dataIDsAfter := testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Equals(t, packIDs, dataIDsAfter)
|
||||||
|
indexIDsAfter := testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Equals(t, indexIDs, indexIDsAfter)
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 2)
|
||||||
|
packIDs = testRunList(t, "packs", env.gopts)
|
||||||
|
indexIDs = testRunList(t, "index", env.gopts)
|
||||||
|
|
||||||
|
// another dry run
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDsAfter = testListSnapshots(t, env.gopts, 2)
|
||||||
|
rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
|
||||||
|
dataIDsAfter = testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Equals(t, packIDs, dataIDsAfter)
|
||||||
|
indexIDsAfter = testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Equals(t, indexIDs, indexIDsAfter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupNonExistingFile(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
|
||||||
|
_ = withRestoreGlobalOptions(func() error {
|
||||||
|
globalOptions.stderr = io.Discard
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "0", "0", "9")
|
||||||
|
dirs := []string{
|
||||||
|
filepath.Join(p, "0"),
|
||||||
|
filepath.Join(p, "1"),
|
||||||
|
filepath.Join(p, "nonexisting"),
|
||||||
|
filepath.Join(p, "5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", dirs, opts, env.gopts)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupSelfHealing(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "test/test")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 5))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// remove all data packs
|
||||||
|
removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
|
||||||
|
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is also missing the data blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil,
|
||||||
|
"backup should have reported an error")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupTreeLoadError(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
p := filepath.Join(env.testdata, "test/test")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 5))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
// Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
|
||||||
|
testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
|
||||||
|
|
||||||
|
r, err := OpenRepository(context.TODO(), env.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.OK(t, r.LoadIndex(context.TODO()))
|
||||||
|
treePacks := restic.NewIDSet()
|
||||||
|
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
|
if pb.Type == restic.TreeBlob {
|
||||||
|
treePacks.Insert(pb.PackID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// delete the subdirectory pack first
|
||||||
|
for id := range treePacks {
|
||||||
|
rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
|
||||||
|
}
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is missing the tree blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// remove all tree packs
|
||||||
|
removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is also missing the data blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "backup should have reported an error")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
var backupExcludeFilenames = []string{
|
||||||
|
"testfile1",
|
||||||
|
"foo.tar.gz",
|
||||||
|
"private/secret/passwords.txt",
|
||||||
|
"work/source/test.c",
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupExclude(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
|
||||||
|
for _, filename := range backupExcludeFilenames {
|
||||||
|
fp := filepath.Join(datadir, filename)
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
|
||||||
|
|
||||||
|
f, err := os.Create(fp)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
fmt.Fprint(f, filename)
|
||||||
|
rtest.OK(t, f.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots := make(map[string]struct{})
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files := testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
|
||||||
|
|
||||||
|
opts.Excludes = []string{"*.tar.gz"}
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files = testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||||
|
|
||||||
|
opts.Excludes = []string{"*.tar.gz", "private/secret"}
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files = testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "passwords.txt")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupErrors(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
|
||||||
|
// Assume failure
|
||||||
|
inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
|
||||||
|
rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
|
||||||
|
defer func() {
|
||||||
|
rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
|
||||||
|
}()
|
||||||
|
opts := BackupOptions{}
|
||||||
|
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
|
||||||
|
rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
incrementalFirstWrite = 10 * 1042 * 1024
|
||||||
|
incrementalSecondWrite = 1 * 1042 * 1024
|
||||||
|
incrementalThirdWrite = 1 * 1042 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIncrementalBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
testfile := filepath.Join(datadir, "testfile")
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat1 := dirStats(env.repo)
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat2 := dirStats(env.repo)
|
||||||
|
if stat2.size-stat1.size > incrementalFirstWrite {
|
||||||
|
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat3 := dirStats(env.repo)
|
||||||
|
if stat3.size-stat2.size > incrementalFirstWrite {
|
||||||
|
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupTags(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 0,
|
||||||
|
"expected no tags, got %v", newest.Tags)
|
||||||
|
parent := newest
|
||||||
|
|
||||||
|
opts.Tags = restic.TagLists{[]string{"NL"}}
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||||
|
"expected one NL tag, got %v", newest.Tags)
|
||||||
|
// Tagged backup should have untagged backup as parent.
|
||||||
|
rtest.Assert(t, parent.ID.Equal(*newest.Parent),
|
||||||
|
"expected parent to be %v, got %v", parent.ID, newest.Parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupProgramVersion(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
newest, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
resticVersion := "restic " + version
|
||||||
|
rtest.Assert(t, newest.ProgramVersion == resticVersion,
|
||||||
|
"expected %v, got %v", resticVersion, newest.ProgramVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQuietBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHardLink(t *testing.T) {
|
||||||
|
// this test assumes a test set with a single directory containing hard linked files
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", "test.hl.tar.gz")
|
||||||
|
fd, err := os.Open(datafile)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.OK(t, fd.Close())
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||||
|
|
||||||
|
linkTests := createFileSetPerHardlink(env.testdata)
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// restore all backups and compare
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
|
||||||
|
|
||||||
|
linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, linksEqual(linkTests, linkResults),
|
||||||
|
"links are not equal")
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func linksEqual(source, dest map[uint64][]string) bool {
|
||||||
|
for _, vs := range source {
|
||||||
|
found := false
|
||||||
|
for kd, vd := range dest {
|
||||||
|
if linkEqual(vs, vd) {
|
||||||
|
delete(dest, kd)
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(dest) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func linkEqual(source, dest []string) bool {
|
||||||
|
// equal if sliced are equal without considering order
|
||||||
|
if source == nil && dest == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if source == nil || dest == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(source) != len(dest) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range source {
|
||||||
|
found := false
|
||||||
|
for j := range dest {
|
||||||
|
if source[i] == dest[j] {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
@@ -155,7 +155,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = tab.Write(gopts.stdout)
|
_ = tab.Write(globalOptions.stdout)
|
||||||
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
|
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var cmdCat = &cobra.Command{
|
var cmdCat = &cobra.Command{
|
||||||
Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
|
Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]",
|
||||||
Short: "Print internal objects to stdout",
|
Short: "Print internal objects to stdout",
|
||||||
Long: `
|
Long: `
|
||||||
The "cat" command is used to print internal objects to stdout.
|
The "cat" command is used to print internal objects to stdout.
|
||||||
@@ -45,7 +45,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -55,7 +55,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
tpe := args[0]
|
tpe := args[0]
|
||||||
|
|
||||||
var id restic.ID
|
var id restic.ID
|
||||||
if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" {
|
if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" && tpe != "tree" {
|
||||||
id, err = restic.ParseID(args[1])
|
id, err = restic.ParseID(args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("unable to parse ID: %v\n", err)
|
return errors.Fatalf("unable to parse ID: %v\n", err)
|
||||||
@@ -72,7 +72,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
Println(string(buf))
|
Println(string(buf))
|
||||||
return nil
|
return nil
|
||||||
case "index":
|
case "index":
|
||||||
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
|
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -80,7 +80,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
Println(string(buf))
|
Println(string(buf))
|
||||||
return nil
|
return nil
|
||||||
case "snapshot":
|
case "snapshot":
|
||||||
sn, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
sn, _, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("could not find snapshot: %v\n", err)
|
return errors.Fatalf("could not find snapshot: %v\n", err)
|
||||||
}
|
}
|
||||||
@@ -165,6 +165,29 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return errors.Fatal("blob not found")
|
return errors.Fatal("blob not found")
|
||||||
|
|
||||||
|
case "tree":
|
||||||
|
sn, subfolder, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("could not find snapshot: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = repo.LoadIndex(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, *sn.Tree, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = globalOptions.stdout.Write(buf)
|
||||||
|
return err
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return errors.Fatal("invalid type")
|
return errors.Fatal("invalid type")
|
||||||
}
|
}
|
||||||
|
@@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/fs"
|
"github.com/restic/restic/internal/fs"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cmdCheck = &cobra.Command{
|
var cmdCheck = &cobra.Command{
|
||||||
@@ -65,7 +66,7 @@ func init() {
|
|||||||
// MarkDeprecated only returns an error when the flag is not found
|
// MarkDeprecated only returns an error when the flag is not found
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
|
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use existing cache, only read uncached data from repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFlags(opts CheckOptions) error {
|
func checkFlags(opts CheckOptions) error {
|
||||||
@@ -97,7 +98,7 @@ func checkFlags(opts CheckOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
fileSize, err := parseSizeStr(opts.ReadDataSubset)
|
fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return argumentError
|
return argumentError
|
||||||
}
|
}
|
||||||
@@ -211,7 +212,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
Verbosef("create exclusive lock for repository\n")
|
Verbosef("create exclusive lock for repository\n")
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -245,7 +246,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
}
|
}
|
||||||
|
|
||||||
if suggestIndexRebuild {
|
if suggestIndexRebuild {
|
||||||
Printf("Duplicate packs/old indexes are non-critical, you can run `restic rebuild-index' to correct this.\n")
|
Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
|
||||||
}
|
}
|
||||||
if mixedFound {
|
if mixedFound {
|
||||||
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
||||||
@@ -363,7 +364,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
if repoSize == 0 {
|
if repoSize == 0 {
|
||||||
return errors.Fatal("Cannot read from a repository having size 0")
|
return errors.Fatal("Cannot read from a repository having size 0")
|
||||||
}
|
}
|
||||||
subsetSize, _ := parseSizeStr(opts.ReadDataSubset)
|
subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
|
||||||
if subsetSize > repoSize {
|
if subsetSize > repoSize {
|
||||||
subsetSize = repoSize
|
subsetSize = repoSize
|
||||||
}
|
}
|
||||||
|
34
cmd/restic/cmd_check_integration_test.go
Normal file
34
cmd/restic/cmd_check_integration_test.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunCheck(t testing.TB, gopts GlobalOptions) {
|
||||||
|
t.Helper()
|
||||||
|
output, err := testRunCheckOutput(gopts, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(output)
|
||||||
|
t.Fatalf("unexpected error: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) {
|
||||||
|
t.Helper()
|
||||||
|
_, err := testRunCheckOutput(gopts, false)
|
||||||
|
rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
opts := CheckOptions{
|
||||||
|
ReadData: true,
|
||||||
|
CheckUnused: checkUnused,
|
||||||
|
}
|
||||||
|
return runCheck(context.TODO(), opts, gopts, nil)
|
||||||
|
})
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
@@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -39,7 +40,7 @@ new destination repository using the "init" command.
|
|||||||
// CopyOptions bundles all options for the copy command.
|
// CopyOptions bundles all options for the copy command.
|
||||||
type CopyOptions struct {
|
type CopyOptions struct {
|
||||||
secondaryRepoOptions
|
secondaryRepoOptions
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
var copyOptions CopyOptions
|
var copyOptions CopyOptions
|
||||||
@@ -49,7 +50,7 @@ func init() {
|
|||||||
|
|
||||||
f := cmdCopy.Flags()
|
f := cmdCopy.Flags()
|
||||||
initSecondaryRepoOptions(f, ©Options.secondaryRepoOptions, "destination", "to copy snapshots from")
|
initSecondaryRepoOptions(f, ©Options.secondaryRepoOptions, "destination", "to copy snapshots from")
|
||||||
initMultiSnapshotFilterOptions(f, ©Options.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(f, ©Options.SnapshotFilter, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
|
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
|
||||||
@@ -74,14 +75,14 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var srcLock *restic.Lock
|
var srcLock *restic.Lock
|
||||||
srcLock, ctx, err = lockRepo(ctx, srcRepo)
|
srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(srcLock)
|
defer unlockRepo(srcLock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dstLock, ctx, err := lockRepo(ctx, dstRepo)
|
dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(dstLock)
|
defer unlockRepo(dstLock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -108,7 +109,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
|
|
||||||
dstSnapshotByOriginal := make(map[restic.ID][]*restic.Snapshot)
|
dstSnapshotByOriginal := make(map[restic.ID][]*restic.Snapshot)
|
||||||
for sn := range FindFilteredSnapshots(ctx, dstSnapshotLister, dstRepo, opts.Hosts, opts.Tags, opts.Paths, nil) {
|
for sn := range FindFilteredSnapshots(ctx, dstSnapshotLister, dstRepo, &opts.SnapshotFilter, nil) {
|
||||||
if sn.Original != nil && !sn.Original.IsNull() {
|
if sn.Original != nil && !sn.Original.IsNull() {
|
||||||
dstSnapshotByOriginal[*sn.Original] = append(dstSnapshotByOriginal[*sn.Original], sn)
|
dstSnapshotByOriginal[*sn.Original] = append(dstSnapshotByOriginal[*sn.Original], sn)
|
||||||
}
|
}
|
||||||
@@ -119,8 +120,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||||||
// remember already processed trees across all snapshots
|
// remember already processed trees across all snapshots
|
||||||
visitedTrees := restic.NewIDSet()
|
visitedTrees := restic.NewIDSet()
|
||||||
|
|
||||||
for sn := range FindFilteredSnapshots(ctx, srcSnapshotLister, srcRepo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, srcSnapshotLister, srcRepo, &opts.SnapshotFilter, args) {
|
||||||
|
|
||||||
// check whether the destination has a snapshot with the same persistent ID which has similar snapshot fields
|
// check whether the destination has a snapshot with the same persistent ID which has similar snapshot fields
|
||||||
srcOriginal := *sn.ID()
|
srcOriginal := *sn.ID()
|
||||||
if sn.Original != nil {
|
if sn.Original != nil {
|
||||||
@@ -237,5 +237,8 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
|||||||
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
||||||
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
|
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
return err
|
if err != nil {
|
||||||
|
return errors.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
136
cmd/restic/cmd_copy_integration_test.go
Normal file
136
cmd/restic/cmd_copy_integration_test.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
|
||||||
|
gopts := srcGopts
|
||||||
|
gopts.Repo = dstGopts.Repo
|
||||||
|
gopts.password = dstGopts.password
|
||||||
|
copyOpts := CopyOptions{
|
||||||
|
secondaryRepoOptions: secondaryRepoOptions{
|
||||||
|
Repo: srcGopts.Repo,
|
||||||
|
password: srcGopts.password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopy(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 3)
|
||||||
|
copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3)
|
||||||
|
|
||||||
|
// Check that the copies size seems reasonable
|
||||||
|
stat := dirStats(env.repo)
|
||||||
|
stat2 := dirStats(env2.repo)
|
||||||
|
sizeDiff := int64(stat.size) - int64(stat2.size)
|
||||||
|
if sizeDiff < 0 {
|
||||||
|
sizeDiff = -sizeDiff
|
||||||
|
}
|
||||||
|
rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
|
||||||
|
stat.size, stat2.size)
|
||||||
|
|
||||||
|
// Check integrity of the copy
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
|
||||||
|
// Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
|
||||||
|
origRestores := make(map[string]struct{})
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
origRestores[restoredir] = struct{}{}
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
}
|
||||||
|
for i, snapshotID := range copiedSnapshotIDs {
|
||||||
|
restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
|
||||||
|
testRunRestore(t, env2.gopts, restoredir, snapshotID)
|
||||||
|
foundMatch := false
|
||||||
|
for cmpdir := range origRestores {
|
||||||
|
diff := directoriesContentsDiff(restoredir, cmpdir)
|
||||||
|
if diff == "" {
|
||||||
|
delete(origRestores, cmpdir)
|
||||||
|
foundMatch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyIncremental(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
testListSnapshots(t, env2.gopts, 2)
|
||||||
|
|
||||||
|
// Check that the copies size seems reasonable
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
|
||||||
|
// check that no snapshots are copied, as there are no new ones
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env2.gopts, 2)
|
||||||
|
|
||||||
|
// check that only new snapshots are copied
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
testListSnapshots(t, env2.gopts, 3)
|
||||||
|
|
||||||
|
// also test the reverse direction
|
||||||
|
testRunCopy(t, env2.gopts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyUnstableJSON(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
// contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
|
||||||
|
datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env2.gopts, 1)
|
||||||
|
}
|
@@ -156,7 +156,7 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -167,20 +167,20 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||||||
|
|
||||||
switch tpe {
|
switch tpe {
|
||||||
case "indexes":
|
case "indexes":
|
||||||
return dumpIndexes(ctx, repo, gopts.stdout)
|
return dumpIndexes(ctx, repo, globalOptions.stdout)
|
||||||
case "snapshots":
|
case "snapshots":
|
||||||
return debugPrintSnapshots(ctx, repo, gopts.stdout)
|
return debugPrintSnapshots(ctx, repo, globalOptions.stdout)
|
||||||
case "packs":
|
case "packs":
|
||||||
return printPacks(ctx, repo, gopts.stdout)
|
return printPacks(ctx, repo, globalOptions.stdout)
|
||||||
case "all":
|
case "all":
|
||||||
Printf("snapshots:\n")
|
Printf("snapshots:\n")
|
||||||
err := debugPrintSnapshots(ctx, repo, gopts.stdout)
|
err := debugPrintSnapshots(ctx, repo, globalOptions.stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Printf("\nindexes:\n")
|
Printf("\nindexes:\n")
|
||||||
err = dumpIndexes(ctx, repo, gopts.stdout)
|
err = dumpIndexes(ctx, repo, globalOptions.stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -462,7 +462,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@@ -54,12 +54,12 @@ func init() {
|
|||||||
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
|
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, error) {
|
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, string, error) {
|
||||||
sn, err := restic.FindSnapshot(ctx, be, repo, desc)
|
sn, subfolder, err := restic.FindSnapshot(ctx, be, repo, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Fatal(err.Error())
|
return nil, "", errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
return sn, err
|
return sn, subfolder, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comparer collects all things needed to compare two snapshots.
|
// Comparer collects all things needed to compare two snapshots.
|
||||||
@@ -334,7 +334,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -346,12 +346,12 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sn1, err := loadSnapshot(ctx, be, repo, args[0])
|
sn1, subfolder1, err := loadSnapshot(ctx, be, repo, args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sn2, err := loadSnapshot(ctx, be, repo, args[1])
|
sn2, subfolder2, err := loadSnapshot(ctx, be, repo, args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -372,6 +372,16 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
|
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sn1.Tree, err = restic.FindTreeDirectory(ctx, repo, sn1.Tree, subfolder1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn2.Tree, err = restic.FindTreeDirectory(ctx, repo, sn2.Tree, subfolder2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
c := &Comparer{
|
c := &Comparer{
|
||||||
repo: repo,
|
repo: repo,
|
||||||
opts: diffOptions,
|
opts: diffOptions,
|
||||||
@@ -381,7 +391,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
enc := json.NewEncoder(gopts.stdout)
|
enc := json.NewEncoder(globalOptions.stdout)
|
||||||
c.printChange = func(change *Change) {
|
c.printChange = func(change *Change) {
|
||||||
err := enc.Encode(change)
|
err := enc.Encode(change)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -415,7 +425,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
|
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
err := json.NewEncoder(gopts.stdout).Encode(stats)
|
err := json.NewEncoder(globalOptions.stdout).Encode(stats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("JSON encode failed: %v\n", err)
|
Warnf("JSON encode failed: %v\n", err)
|
||||||
}
|
}
|
||||||
|
193
cmd/restic/cmd_diff_integration_test.go
Normal file
193
cmd/restic/cmd_diff_integration_test.go
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
opts := DiffOptions{
|
||||||
|
ShowMetadata: false,
|
||||||
|
}
|
||||||
|
return runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
|
||||||
|
})
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFile(dst string, src string) error {
|
||||||
|
srcFile, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstFile, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = srcFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(dstFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = srcFile.Close()
|
||||||
|
_ = dstFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = srcFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = dstFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dstFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var diffOutputRegexPatterns = []string{
|
||||||
|
"-.+modfile",
|
||||||
|
"M.+modfile1",
|
||||||
|
"\\+.+modfile2",
|
||||||
|
"\\+.+modfile3",
|
||||||
|
"\\+.+modfile4",
|
||||||
|
"-.+submoddir",
|
||||||
|
"-.+submoddir.subsubmoddir",
|
||||||
|
"\\+.+submoddir2",
|
||||||
|
"\\+.+submoddir2.subsubmoddir",
|
||||||
|
"Files: +2 new, +1 removed, +1 changed",
|
||||||
|
"Dirs: +3 new, +2 removed",
|
||||||
|
"Data Blobs: +2 new, +1 removed",
|
||||||
|
"Added: +7[0-9]{2}\\.[0-9]{3} KiB",
|
||||||
|
"Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
testdir := filepath.Join(datadir, "testdir")
|
||||||
|
subtestdir := filepath.Join(testdir, "subtestdir")
|
||||||
|
testfile := filepath.Join(testdir, "testfile")
|
||||||
|
|
||||||
|
rtest.OK(t, os.Mkdir(testdir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(subtestdir, 0755))
|
||||||
|
rtest.OK(t, appendRandomData(testfile, 256*1024))
|
||||||
|
|
||||||
|
moddir := filepath.Join(datadir, "moddir")
|
||||||
|
submoddir := filepath.Join(moddir, "submoddir")
|
||||||
|
subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
|
||||||
|
modfile := filepath.Join(moddir, "modfile")
|
||||||
|
rtest.OK(t, os.Mkdir(moddir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(submoddir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
|
||||||
|
rtest.OK(t, copyFile(modfile, testfile))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
|
||||||
|
|
||||||
|
snapshots := make(map[string]struct{})
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
|
||||||
|
rtest.OK(t, os.Rename(modfile, modfile+"3"))
|
||||||
|
rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
|
||||||
|
rtest.OK(t, os.Mkdir(modfile+"4", 0755))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
_, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
|
||||||
|
return env, cleanup, firstSnapshotID, secondSnapshotID
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiff(t *testing.T) {
|
||||||
|
env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// quiet suppresses the diff output except for the summary
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
_, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
|
||||||
|
rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
|
||||||
|
|
||||||
|
out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
for _, pattern := range diffOutputRegexPatterns {
|
||||||
|
r, err := regexp.Compile(pattern)
|
||||||
|
rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
|
||||||
|
rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check quiet output
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
type typeSniffer struct {
|
||||||
|
MessageType string `json:"message_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiffJSON(t *testing.T) {
|
||||||
|
env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// quiet suppresses the diff output except for the summary
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
env.gopts.JSON = true
|
||||||
|
out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
var stat DiffStatsContainer
|
||||||
|
var changes int
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(out))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
var sniffer typeSniffer
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
|
||||||
|
switch sniffer.MessageType {
|
||||||
|
case "change":
|
||||||
|
changes++
|
||||||
|
case "statistics":
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(line), &stat))
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected message type %v", sniffer.MessageType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rtest.Equals(t, 9, changes)
|
||||||
|
rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
|
||||||
|
stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
|
||||||
|
stat.ChangedFiles == 1, "unexpected statistics")
|
||||||
|
|
||||||
|
// check quiet output
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
stat = DiffStatsContainer{}
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
|
||||||
|
rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
|
||||||
|
stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
|
||||||
|
stat.ChangedFiles == 1, "unexpected statistics")
|
||||||
|
rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
|
||||||
|
}
|
@@ -40,7 +40,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
|
|
||||||
// DumpOptions collects all options for the dump command.
|
// DumpOptions collects all options for the dump command.
|
||||||
type DumpOptions struct {
|
type DumpOptions struct {
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
Archive string
|
Archive string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdDump)
|
cmdRoot.AddCommand(cmdDump)
|
||||||
|
|
||||||
flags := cmdDump.Flags()
|
flags := cmdDump.Flags()
|
||||||
initSingleSnapshotFilterOptions(flags, &dumpOptions.snapshotFilterOptions)
|
initSingleSnapshotFilter(flags, &dumpOptions.SnapshotFilter)
|
||||||
flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"")
|
flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,14 +132,18 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := restic.FindFilteredSnapshot(ctx, repo.Backend(), repo, opts.Paths, opts.Tags, opts.Hosts, nil, snapshotIDString)
|
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||||
|
Hosts: opts.Hosts,
|
||||||
|
Paths: opts.Paths,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
}).FindLatest(ctx, repo.Backend(), repo, snapshotIDString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("failed to find snapshot: %v", err)
|
return errors.Fatalf("failed to find snapshot: %v", err)
|
||||||
}
|
}
|
||||||
@@ -149,6 +153,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
tree, err := restic.LoadTree(ctx, repo, *sn.Tree)
|
tree, err := restic.LoadTree(ctx, repo, *sn.Tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
|
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
|
||||||
|
@@ -51,7 +51,8 @@ type FindOptions struct {
|
|||||||
PackID, ShowPackID bool
|
PackID, ShowPackID bool
|
||||||
CaseInsensitive bool
|
CaseInsensitive bool
|
||||||
ListLong bool
|
ListLong bool
|
||||||
snapshotFilterOptions
|
HumanReadable bool
|
||||||
|
restic.SnapshotFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
var findOptions FindOptions
|
var findOptions FindOptions
|
||||||
@@ -69,8 +70,9 @@ func init() {
|
|||||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
||||||
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
||||||
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||||
|
f.BoolVar(&findOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
|
||||||
|
|
||||||
initMultiSnapshotFilterOptions(f, &findOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(f, &findOptions.SnapshotFilter, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
type findPattern struct {
|
type findPattern struct {
|
||||||
@@ -104,12 +106,13 @@ func parseTime(str string) (time.Time, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type statefulOutput struct {
|
type statefulOutput struct {
|
||||||
ListLong bool
|
ListLong bool
|
||||||
JSON bool
|
HumanReadable bool
|
||||||
inuse bool
|
JSON bool
|
||||||
newsn *restic.Snapshot
|
inuse bool
|
||||||
oldsn *restic.Snapshot
|
newsn *restic.Snapshot
|
||||||
hits int
|
oldsn *restic.Snapshot
|
||||||
|
hits int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
||||||
@@ -164,7 +167,7 @@ func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) {
|
|||||||
s.oldsn = s.newsn
|
s.oldsn = s.newsn
|
||||||
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
|
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
|
||||||
}
|
}
|
||||||
Println(formatNode(path, node, s.ListLong))
|
Println(formatNode(path, node, s.ListLong, s.HumanReadable))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
|
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
|
||||||
@@ -501,7 +504,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
|||||||
return packIDs
|
return packIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||||
idx := f.repo.Index()
|
idx := f.repo.Index()
|
||||||
|
|
||||||
rid, err := restic.ParseID(id)
|
rid, err := restic.ParseID(id)
|
||||||
@@ -524,13 +527,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
func (f *Finder) findObjectsPacks() {
|
||||||
for i := range f.blobIDs {
|
for i := range f.blobIDs {
|
||||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
f.findObjectPack(i, restic.DataBlob)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range f.treeIDs {
|
for i := range f.treeIDs {
|
||||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
f.findObjectPack(i, restic.TreeBlob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -575,7 +578,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -594,7 +597,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
f := &Finder{
|
f := &Finder{
|
||||||
repo: repo,
|
repo: repo,
|
||||||
pat: pat,
|
pat: pat,
|
||||||
out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
|
out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
|
||||||
ignoreTrees: restic.NewIDSet(),
|
ignoreTrees: restic.NewIDSet(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -618,7 +621,16 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, opts.Hosts, opts.Tags, opts.Paths, opts.Snapshots) {
|
var filteredSnapshots []*restic.Snapshot
|
||||||
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
||||||
|
filteredSnapshots = append(filteredSnapshots, sn)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(filteredSnapshots, func(i, j int) bool {
|
||||||
|
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, sn := range filteredSnapshots {
|
||||||
if f.blobIDs != nil || f.treeIDs != nil {
|
if f.blobIDs != nil || f.treeIDs != nil {
|
||||||
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
|
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
|
||||||
return err
|
return err
|
||||||
@@ -632,7 +644,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
f.out.Finish()
|
f.out.Finish()
|
||||||
|
|
||||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||||
f.findObjectsPacks(ctx)
|
f.findObjectsPacks()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
87
cmd/restic/cmd_find_integration_test.go
Normal file
87
cmd/restic/cmd_find_integration_test.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.JSON = wantJSON
|
||||||
|
|
||||||
|
opts := FindOptions{}
|
||||||
|
return runFind(context.TODO(), opts, gopts, []string{pattern})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFind(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
results := testRunFind(t, false, env.gopts, "unexistingfile")
|
||||||
|
rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, false, env.gopts, "testfile")
|
||||||
|
lines := strings.Split(string(results), "\n")
|
||||||
|
rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, false, env.gopts, "testfile*")
|
||||||
|
lines = strings.Split(string(results), "\n")
|
||||||
|
rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testMatch struct {
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
Permissions string `json:"permissions,omitempty"`
|
||||||
|
Size uint64 `json:"size,omitempty"`
|
||||||
|
Date time.Time `json:"date,omitempty"`
|
||||||
|
UID uint32 `json:"uid,omitempty"`
|
||||||
|
GID uint32 `json:"gid,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type testMatches struct {
|
||||||
|
Hits int `json:"hits,omitempty"`
|
||||||
|
SnapshotID string `json:"snapshot,omitempty"`
|
||||||
|
Matches []testMatch `json:"matches,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindJSON(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
results := testRunFind(t, true, env.gopts, "unexistingfile")
|
||||||
|
matches := []testMatches{}
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, true, env.gopts, "testfile")
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||||
|
rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
|
||||||
|
rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, true, env.gopts, "testfile*")
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||||
|
rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
|
||||||
|
rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
|
||||||
|
}
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
@@ -36,14 +37,49 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ForgetPolicyCount int
|
||||||
|
|
||||||
|
var ErrNegativePolicyCount = errors.New("negative values not allowed, use 'unlimited' instead")
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) Set(s string) error {
|
||||||
|
switch s {
|
||||||
|
case "unlimited":
|
||||||
|
*c = -1
|
||||||
|
default:
|
||||||
|
val, err := strconv.ParseInt(s, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if val < 0 {
|
||||||
|
return ErrNegativePolicyCount
|
||||||
|
}
|
||||||
|
*c = ForgetPolicyCount(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) String() string {
|
||||||
|
switch *c {
|
||||||
|
case -1:
|
||||||
|
return "unlimited"
|
||||||
|
default:
|
||||||
|
return strconv.FormatInt(int64(*c), 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) Type() string {
|
||||||
|
return "n"
|
||||||
|
}
|
||||||
|
|
||||||
// ForgetOptions collects all options for the forget command.
|
// ForgetOptions collects all options for the forget command.
|
||||||
type ForgetOptions struct {
|
type ForgetOptions struct {
|
||||||
Last int
|
Last ForgetPolicyCount
|
||||||
Hourly int
|
Hourly ForgetPolicyCount
|
||||||
Daily int
|
Daily ForgetPolicyCount
|
||||||
Weekly int
|
Weekly ForgetPolicyCount
|
||||||
Monthly int
|
Monthly ForgetPolicyCount
|
||||||
Yearly int
|
Yearly ForgetPolicyCount
|
||||||
Within restic.Duration
|
Within restic.Duration
|
||||||
WithinHourly restic.Duration
|
WithinHourly restic.Duration
|
||||||
WithinDaily restic.Duration
|
WithinDaily restic.Duration
|
||||||
@@ -52,11 +88,11 @@ type ForgetOptions struct {
|
|||||||
WithinYearly restic.Duration
|
WithinYearly restic.Duration
|
||||||
KeepTags restic.TagLists
|
KeepTags restic.TagLists
|
||||||
|
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
Compact bool
|
Compact bool
|
||||||
|
|
||||||
// Grouping
|
// Grouping
|
||||||
GroupBy string
|
GroupBy restic.SnapshotGroupByOptions
|
||||||
DryRun bool
|
DryRun bool
|
||||||
Prune bool
|
Prune bool
|
||||||
}
|
}
|
||||||
@@ -67,12 +103,12 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdForget)
|
cmdRoot.AddCommand(cmdForget)
|
||||||
|
|
||||||
f := cmdForget.Flags()
|
f := cmdForget.Flags()
|
||||||
f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
|
f.VarP(&forgetOptions.Last, "keep-last", "l", "keep the last `n` snapshots (use 'unlimited' to keep all snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
|
f.VarP(&forgetOptions.Hourly, "keep-hourly", "H", "keep the last `n` hourly snapshots (use 'unlimited' to keep all hourly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
|
f.VarP(&forgetOptions.Daily, "keep-daily", "d", "keep the last `n` daily snapshots (use 'unlimited' to keep all daily snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
|
f.VarP(&forgetOptions.Weekly, "keep-weekly", "w", "keep the last `n` weekly snapshots (use 'unlimited' to keep all weekly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
|
f.VarP(&forgetOptions.Monthly, "keep-monthly", "m", "keep the last `n` monthly snapshots (use 'unlimited' to keep all monthly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
|
f.VarP(&forgetOptions.Yearly, "keep-yearly", "y", "keep the last `n` yearly snapshots (use 'unlimited' to keep all yearly snapshots)")
|
||||||
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
@@ -81,7 +117,7 @@ func init() {
|
|||||||
f.VarP(&forgetOptions.WithinYearly, "keep-within-yearly", "", "keep yearly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.WithinYearly, "keep-within-yearly", "", "keep yearly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)")
|
f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)")
|
||||||
|
|
||||||
initMultiSnapshotFilterOptions(f, &forgetOptions.snapshotFilterOptions, false)
|
initMultiSnapshotFilter(f, &forgetOptions.SnapshotFilter, false)
|
||||||
f.StringArrayVar(&forgetOptions.Hosts, "hostname", nil, "only consider snapshots with the given `hostname` (can be specified multiple times)")
|
f.StringArrayVar(&forgetOptions.Hosts, "hostname", nil, "only consider snapshots with the given `hostname` (can be specified multiple times)")
|
||||||
err := f.MarkDeprecated("hostname", "use --host")
|
err := f.MarkDeprecated("hostname", "use --host")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -90,8 +126,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
|
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
|
||||||
|
forgetOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
f.VarP(&forgetOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
||||||
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
||||||
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
|
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
|
||||||
|
|
||||||
@@ -99,8 +135,29 @@ func init() {
|
|||||||
addPruneOptions(cmdForget)
|
addPruneOptions(cmdForget)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyForgetOptions(opts *ForgetOptions) error {
|
||||||
|
if opts.Last < -1 || opts.Hourly < -1 || opts.Daily < -1 || opts.Weekly < -1 ||
|
||||||
|
opts.Monthly < -1 || opts.Yearly < -1 {
|
||||||
|
return errors.Fatal("negative values other than -1 are not allowed for --keep-*")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range []restic.Duration{opts.Within, opts.WithinHourly, opts.WithinDaily,
|
||||||
|
opts.WithinMonthly, opts.WithinWeekly, opts.WithinYearly} {
|
||||||
|
if d.Hours < 0 || d.Days < 0 || d.Months < 0 || d.Years < 0 {
|
||||||
|
return errors.Fatal("durations containing negative values are not allowed for --keep-within*")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||||
err := verifyPruneOptions(&pruneOptions)
|
err := verifyForgetOptions(&opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifyPruneOptions(&pruneOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -116,7 +173,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
|
|
||||||
if !opts.DryRun || !gopts.NoLock {
|
if !opts.DryRun || !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -126,7 +183,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
var snapshots restic.Snapshots
|
var snapshots restic.Snapshots
|
||||||
removeSnIDs := restic.NewIDSet()
|
removeSnIDs := restic.NewIDSet()
|
||||||
|
|
||||||
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
|
||||||
snapshots = append(snapshots, sn)
|
snapshots = append(snapshots, sn)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,12 +201,12 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
policy := restic.ExpirePolicy{
|
policy := restic.ExpirePolicy{
|
||||||
Last: opts.Last,
|
Last: int(opts.Last),
|
||||||
Hourly: opts.Hourly,
|
Hourly: int(opts.Hourly),
|
||||||
Daily: opts.Daily,
|
Daily: int(opts.Daily),
|
||||||
Weekly: opts.Weekly,
|
Weekly: int(opts.Weekly),
|
||||||
Monthly: opts.Monthly,
|
Monthly: int(opts.Monthly),
|
||||||
Yearly: opts.Yearly,
|
Yearly: int(opts.Yearly),
|
||||||
Within: opts.Within,
|
Within: opts.Within,
|
||||||
WithinHourly: opts.WithinHourly,
|
WithinHourly: opts.WithinHourly,
|
||||||
WithinDaily: opts.WithinDaily,
|
WithinDaily: opts.WithinDaily,
|
||||||
@@ -172,7 +229,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
|
|
||||||
for k, snapshotGroup := range snapshotGroups {
|
for k, snapshotGroup := range snapshotGroups {
|
||||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||||
err = PrintSnapshotGroupHeader(gopts.stdout, k)
|
err = PrintSnapshotGroupHeader(globalOptions.stdout, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -229,7 +286,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gopts.JSON && len(jsonGroups) > 0 {
|
if gopts.JSON && len(jsonGroups) > 0 {
|
||||||
err = printJSONForget(gopts.stdout, jsonGroups)
|
err = printJSONForget(globalOptions.stdout, jsonGroups)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
13
cmd/restic/cmd_forget_integration_test.go
Normal file
13
cmd/restic/cmd_forget_integration_test.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
|
opts := ForgetOptions{}
|
||||||
|
rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
|
||||||
|
}
|
94
cmd/restic/cmd_forget_test.go
Normal file
94
cmd/restic/cmd_forget_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestForgetPolicyValues(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
value ForgetPolicyCount
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{"0", ForgetPolicyCount(0), ""},
|
||||||
|
{"1", ForgetPolicyCount(1), ""},
|
||||||
|
{"unlimited", ForgetPolicyCount(-1), ""},
|
||||||
|
{"", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"\": invalid syntax"},
|
||||||
|
{"-1", ForgetPolicyCount(0), ErrNegativePolicyCount.Error()},
|
||||||
|
{"abc", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"abc\": invalid syntax"},
|
||||||
|
}
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
var count ForgetPolicyCount
|
||||||
|
err := count.Set(testCase.input)
|
||||||
|
|
||||||
|
if testCase.err != "" {
|
||||||
|
rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
|
||||||
|
rtest.Equals(t, testCase.err, err.Error())
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, err == nil, "expected no error for input %+v, got %v", testCase.input, err)
|
||||||
|
rtest.Equals(t, testCase.value, count)
|
||||||
|
rtest.Equals(t, testCase.input, count.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestForgetOptionValues(t *testing.T) {
|
||||||
|
const negValErrorMsg = "Fatal: negative values other than -1 are not allowed for --keep-*"
|
||||||
|
const negDurationValErrorMsg = "Fatal: durations containing negative values are not allowed for --keep-within*"
|
||||||
|
testCases := []struct {
|
||||||
|
input ForgetOptions
|
||||||
|
errorMsg string
|
||||||
|
}{
|
||||||
|
{ForgetOptions{Last: 1}, ""},
|
||||||
|
{ForgetOptions{Hourly: 1}, ""},
|
||||||
|
{ForgetOptions{Daily: 1}, ""},
|
||||||
|
{ForgetOptions{Weekly: 1}, ""},
|
||||||
|
{ForgetOptions{Monthly: 1}, ""},
|
||||||
|
{ForgetOptions{Yearly: 1}, ""},
|
||||||
|
{ForgetOptions{Last: 0}, ""},
|
||||||
|
{ForgetOptions{Hourly: 0}, ""},
|
||||||
|
{ForgetOptions{Daily: 0}, ""},
|
||||||
|
{ForgetOptions{Weekly: 0}, ""},
|
||||||
|
{ForgetOptions{Monthly: 0}, ""},
|
||||||
|
{ForgetOptions{Yearly: 0}, ""},
|
||||||
|
{ForgetOptions{Last: -1}, ""},
|
||||||
|
{ForgetOptions{Hourly: -1}, ""},
|
||||||
|
{ForgetOptions{Daily: -1}, ""},
|
||||||
|
{ForgetOptions{Weekly: -1}, ""},
|
||||||
|
{ForgetOptions{Monthly: -1}, ""},
|
||||||
|
{ForgetOptions{Yearly: -1}, ""},
|
||||||
|
{ForgetOptions{Last: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Hourly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Daily: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Weekly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Monthly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Yearly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Within: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
|
||||||
|
{ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
|
||||||
|
{ForgetOptions{Within: restic.ParseDurationOrPanic("-1y2m3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y-2m3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m-3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d-3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("-2y4m6d8h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y-4m6d8h")}, negDurationValErrorMsg},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
err := verifyForgetOptions(&testCase.input)
|
||||||
|
if testCase.errorMsg != "" {
|
||||||
|
rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
|
||||||
|
rtest.Equals(t, testCase.errorMsg, err.Error())
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, err == nil, "expected no error for input %+v", testCase.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -63,26 +63,38 @@ func writeManpages(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func writeBashCompletion(file string) error {
|
func writeBashCompletion(file string) error {
|
||||||
Verbosef("writing bash completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing bash completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenBashCompletionFile(file)
|
return cmdRoot.GenBashCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeFishCompletion(file string) error {
|
func writeFishCompletion(file string) error {
|
||||||
Verbosef("writing fish completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing fish completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenFishCompletionFile(file, true)
|
return cmdRoot.GenFishCompletionFile(file, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeZSHCompletion(file string) error {
|
func writeZSHCompletion(file string) error {
|
||||||
Verbosef("writing zsh completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing zsh completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenZshCompletionFile(file)
|
return cmdRoot.GenZshCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writePowerShellCompletion(file string) error {
|
func writePowerShellCompletion(file string) error {
|
||||||
Verbosef("writing powershell completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing powershell completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenPowerShellCompletionFile(file)
|
return cmdRoot.GenPowerShellCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runGenerate(cmd *cobra.Command, args []string) error {
|
func runGenerate(_ *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
|
||||||
|
}
|
||||||
|
|
||||||
if genOpts.ManDir != "" {
|
if genOpts.ManDir != "" {
|
||||||
err := writeManpages(genOpts.ManDir)
|
err := writeManpages(genOpts.ManDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -50,6 +50,10 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
|
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.Fatal("the init command expects no arguments, only options - please see `restic help init` for usage and flags")
|
||||||
|
}
|
||||||
|
|
||||||
var version uint
|
var version uint
|
||||||
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
|
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
|
||||||
version = restic.MaxRepoVersion
|
version = restic.MaxRepoVersion
|
||||||
@@ -83,9 +87,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
be, err := create(ctx, repo, gopts.extended)
|
be, err := create(ctx, repo, gopts, gopts.extended)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := repository.New(be, repository.Options{
|
s, err := repository.New(be, repository.Options{
|
||||||
@@ -93,16 +97,21 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
PackSize: gopts.PackSize * 1024 * 1024,
|
PackSize: gopts.PackSize * 1024 * 1024,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
|
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
Verbosef("created restic repository %v at %s\n", s.Config().ID[:10], location.StripPassword(gopts.Repo))
|
Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.backends, gopts.Repo))
|
||||||
|
if opts.CopyChunkerParameters && chunkerPolynomial != nil {
|
||||||
|
Verbosef(" with chunker parameters copied from secondary repository\n")
|
||||||
|
} else {
|
||||||
|
Verbosef("\n")
|
||||||
|
}
|
||||||
Verbosef("\n")
|
Verbosef("\n")
|
||||||
Verbosef("Please note that knowledge of your password is required to access\n")
|
Verbosef("Please note that knowledge of your password is required to access\n")
|
||||||
Verbosef("the repository. Losing your password means that your data is\n")
|
Verbosef("the repository. Losing your password means that your data is\n")
|
||||||
@@ -112,9 +121,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
status := initSuccess{
|
status := initSuccess{
|
||||||
MessageType: "initialized",
|
MessageType: "initialized",
|
||||||
ID: s.Config().ID,
|
ID: s.Config().ID,
|
||||||
Repository: location.StripPassword(gopts.Repo),
|
Repository: location.StripPassword(gopts.backends, gopts.Repo),
|
||||||
}
|
}
|
||||||
return json.NewEncoder(gopts.stdout).Encode(status)
|
return json.NewEncoder(globalOptions.stdout).Encode(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
49
cmd/restic/cmd_init_integration_test.go
Normal file
49
cmd/restic/cmd_init_integration_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunInit(t testing.TB, opts GlobalOptions) {
|
||||||
|
repository.TestUseLowSecurityKDFParameters(t)
|
||||||
|
restic.TestDisableCheckPolynomial(t)
|
||||||
|
restic.TestSetLockTimeout(t, 0)
|
||||||
|
|
||||||
|
rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
|
||||||
|
t.Logf("repository initialized at %v", opts.Repo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitCopyChunkerParams(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
|
||||||
|
initOpts := InitOptions{
|
||||||
|
secondaryRepoOptions: secondaryRepoOptions{
|
||||||
|
Repo: env2.gopts.Repo,
|
||||||
|
password: env2.gopts.password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
|
||||||
|
|
||||||
|
initOpts.CopyChunkerParameters = true
|
||||||
|
rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
|
||||||
|
|
||||||
|
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
|
||||||
|
"expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
|
||||||
|
otherRepo.Config().ChunkerPolynomial)
|
||||||
|
}
|
@@ -212,7 +212,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "list":
|
case "list":
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -220,7 +220,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return listKeys(ctx, repo, gopts)
|
return listKeys(ctx, repo, gopts)
|
||||||
case "add":
|
case "add":
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -228,7 +228,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return addKey(ctx, repo, gopts)
|
return addKey(ctx, repo, gopts)
|
||||||
case "remove":
|
case "remove":
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -241,7 +241,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return deleteKey(ctx, repo, id)
|
return deleteKey(ctx, repo, id)
|
||||||
case "passwd":
|
case "passwd":
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
145
cmd/restic/cmd_key_integration_test.go
Normal file
145
cmd/restic/cmd_key_integration_test.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
return runKey(context.TODO(), gopts, []string{"list"})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(buf)
|
||||||
|
exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
|
||||||
|
|
||||||
|
IDs := []string{}
|
||||||
|
for scanner.Scan() {
|
||||||
|
if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
|
||||||
|
IDs = append(IDs, id[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = newPassword
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = "john's geheimnis"
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
keyUsername = ""
|
||||||
|
keyHostname = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
|
||||||
|
|
||||||
|
t.Log("adding key for john@example.com")
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
|
||||||
|
|
||||||
|
repo, err := OpenRepository(context.TODO(), gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Equals(t, "john", key.Username)
|
||||||
|
rtest.Equals(t, "example.com", key.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = newPassword
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
|
||||||
|
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
|
||||||
|
for _, id := range IDs {
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyAddRemove(t *testing.T) {
|
||||||
|
passwordList := []string{
|
||||||
|
"OnnyiasyatvodsEvVodyawit",
|
||||||
|
"raicneirvOjEfEigonOmLasOd",
|
||||||
|
}
|
||||||
|
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
// must list keys more than once
|
||||||
|
env.gopts.backendTestHook = nil
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
testRunKeyPasswd(t, "geheim2", env.gopts)
|
||||||
|
env.gopts.password = "geheim2"
|
||||||
|
t.Logf("changed password to %q", env.gopts.password)
|
||||||
|
|
||||||
|
for _, newPassword := range passwordList {
|
||||||
|
testRunKeyAddNewKey(t, newPassword, env.gopts)
|
||||||
|
t.Logf("added new password %q", newPassword)
|
||||||
|
env.gopts.password = newPassword
|
||||||
|
testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
env.gopts.password = passwordList[len(passwordList)-1]
|
||||||
|
t.Logf("testing access with last password %q\n", env.gopts.password)
|
||||||
|
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunKeyAddNewKeyUserHost(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
type emptySaveBackend struct {
|
||||||
|
restic.Backend
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
|
||||||
|
return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyProblems(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
|
||||||
|
return &emptySaveBackend{r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
testKeyNewPassword = "geheim2"
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := runKey(context.TODO(), env.gopts, []string{"passwd"})
|
||||||
|
t.Log(err)
|
||||||
|
rtest.Assert(t, err != nil, "expected passwd change to fail")
|
||||||
|
|
||||||
|
err = runKey(context.TODO(), env.gopts, []string{"add"})
|
||||||
|
t.Log(err)
|
||||||
|
rtest.Assert(t, err != nil, "expected key adding to fail")
|
||||||
|
|
||||||
|
t.Logf("testing access with initial password %q\n", env.gopts.password)
|
||||||
|
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
@@ -31,19 +31,19 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdList)
|
cmdRoot.AddCommand(cmdList)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args []string) error {
|
func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.Fatal("type not specified, usage: " + cmd.Use)
|
return errors.Fatal("type not specified, usage: " + cmd.Use)
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := OpenRepository(ctx, opts)
|
repo, err := OpenRepository(ctx, gopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.NoLock && args[0] != "locks" {
|
if !gopts.NoLock && args[0] != "locks" {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
44
cmd/restic/cmd_list_integration_test.go
Normal file
44
cmd/restic/cmd_list_integration_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
return runList(context.TODO(), cmdList, opts, []string{tpe})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return parseIDsFromReader(t, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
|
||||||
|
t.Helper()
|
||||||
|
IDs := restic.IDs{}
|
||||||
|
sc := bufio.NewScanner(rd)
|
||||||
|
|
||||||
|
for sc.Scan() {
|
||||||
|
id, err := restic.ParseID(sc.Text())
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("parse id %v: %v", sc.Text(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
IDs = append(IDs, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs {
|
||||||
|
t.Helper()
|
||||||
|
snapshotIDs := testRunList(t, "snapshots", opts)
|
||||||
|
rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs)
|
||||||
|
return snapshotIDs
|
||||||
|
}
|
@@ -49,8 +49,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
// LsOptions collects all options for the ls command.
|
// LsOptions collects all options for the ls command.
|
||||||
type LsOptions struct {
|
type LsOptions struct {
|
||||||
ListLong bool
|
ListLong bool
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
Recursive bool
|
Recursive bool
|
||||||
|
HumanReadable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var lsOptions LsOptions
|
var lsOptions LsOptions
|
||||||
@@ -59,9 +60,10 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdLs)
|
cmdRoot.AddCommand(cmdLs)
|
||||||
|
|
||||||
flags := cmdLs.Flags()
|
flags := cmdLs.Flags()
|
||||||
initSingleSnapshotFilterOptions(flags, &lsOptions.snapshotFilterOptions)
|
initSingleSnapshotFilter(flags, &lsOptions.SnapshotFilter)
|
||||||
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||||
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
|
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
|
||||||
|
flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
|
||||||
}
|
}
|
||||||
|
|
||||||
type lsSnapshot struct {
|
type lsSnapshot struct {
|
||||||
@@ -181,7 +183,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
)
|
)
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
enc := json.NewEncoder(gopts.stdout)
|
enc := json.NewEncoder(globalOptions.stdout)
|
||||||
|
|
||||||
printSnapshot = func(sn *restic.Snapshot) {
|
printSnapshot = func(sn *restic.Snapshot) {
|
||||||
err := enc.Encode(lsSnapshot{
|
err := enc.Encode(lsSnapshot{
|
||||||
@@ -206,11 +208,20 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
|
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
|
||||||
}
|
}
|
||||||
printNode = func(path string, node *restic.Node) {
|
printNode = func(path string, node *restic.Node) {
|
||||||
Printf("%s\n", formatNode(path, node, lsOptions.ListLong))
|
Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := restic.FindFilteredSnapshot(ctx, snapshotLister, repo, opts.Hosts, opts.Tags, opts.Paths, nil, args[0])
|
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||||
|
Hosts: opts.Hosts,
|
||||||
|
Paths: opts.Paths,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
}).FindLatest(ctx, snapshotLister, repo, args[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
19
cmd/restic/cmd_ls_integration_test.go
Normal file
19
cmd/restic/cmd_ls_integration_test.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.Quiet = true
|
||||||
|
opts := LsOptions{}
|
||||||
|
return runLs(context.TODO(), opts, gopts, []string{snapshotID})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return strings.Split(buf.String(), "\n")
|
||||||
|
}
|
@@ -122,7 +122,7 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@@ -77,7 +77,7 @@ type MountOptions struct {
|
|||||||
OwnerRoot bool
|
OwnerRoot bool
|
||||||
AllowOther bool
|
AllowOther bool
|
||||||
NoDefaultPermissions bool
|
NoDefaultPermissions bool
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
TimeTemplate string
|
TimeTemplate string
|
||||||
PathTemplates []string
|
PathTemplates []string
|
||||||
}
|
}
|
||||||
@@ -92,7 +92,7 @@ func init() {
|
|||||||
mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory")
|
mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory")
|
||||||
mountFlags.BoolVar(&mountOptions.NoDefaultPermissions, "no-default-permissions", false, "for 'allow-other', ignore Unix permissions and allow users to read all snapshot files")
|
mountFlags.BoolVar(&mountOptions.NoDefaultPermissions, "no-default-permissions", false, "for 'allow-other', ignore Unix permissions and allow users to read all snapshot files")
|
||||||
|
|
||||||
initMultiSnapshotFilterOptions(mountFlags, &mountOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(mountFlags, &mountOptions.SnapshotFilter, true)
|
||||||
|
|
||||||
mountFlags.StringArrayVar(&mountOptions.PathTemplates, "path-template", nil, "set `template` for path names (can be specified multiple times)")
|
mountFlags.StringArrayVar(&mountOptions.PathTemplates, "path-template", nil, "set `template` for path names (can be specified multiple times)")
|
||||||
mountFlags.StringVar(&mountOptions.TimeTemplate, "snapshot-template", time.RFC3339, "set `template` to use for snapshot dirs")
|
mountFlags.StringVar(&mountOptions.TimeTemplate, "snapshot-template", time.RFC3339, "set `template` to use for snapshot dirs")
|
||||||
@@ -123,7 +123,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -180,9 +180,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||||||
|
|
||||||
cfg := fuse.Config{
|
cfg := fuse.Config{
|
||||||
OwnerIsRoot: opts.OwnerRoot,
|
OwnerIsRoot: opts.OwnerRoot,
|
||||||
Hosts: opts.Hosts,
|
Filter: opts.SnapshotFilter,
|
||||||
Tags: opts.Tags,
|
|
||||||
Paths: opts.Paths,
|
|
||||||
TimeTemplate: opts.TimeTemplate,
|
TimeTemplate: opts.TimeTemplate,
|
||||||
PathTemplates: opts.PathTemplates,
|
PathTemplates: opts.PathTemplates,
|
||||||
}
|
}
|
||||||
|
@@ -12,6 +12,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
@@ -63,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
|||||||
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
func testRunUmount(t testing.TB, dir string) {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < mountWait; i++ {
|
for i := 0; i < mountWait; i++ {
|
||||||
if err = umount(dir); err == nil {
|
if err = umount(dir); err == nil {
|
||||||
@@ -94,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
|||||||
go testRunMount(t, global, mountpoint, &wg)
|
go testRunMount(t, global, mountpoint, &wg)
|
||||||
waitForMount(t, mountpoint)
|
waitForMount(t, mountpoint)
|
||||||
defer wg.Wait()
|
defer wg.Wait()
|
||||||
defer testRunUmount(t, global, mountpoint)
|
defer testRunUmount(t, mountpoint)
|
||||||
|
|
||||||
if !snapshotsDirExists(t, mountpoint) {
|
if !snapshotsDirExists(t, mountpoint) {
|
||||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||||
@@ -159,6 +160,11 @@ func TestMount(t *testing.T) {
|
|||||||
t.Skip("Skipping fuse tests")
|
t.Skip("Skipping fuse tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debugEnabled := debug.TestLogToStderr(t)
|
||||||
|
if debugEnabled {
|
||||||
|
defer debug.TestDisableLog(t)
|
||||||
|
}
|
||||||
|
|
||||||
env, cleanup := withTestEnvironment(t)
|
env, cleanup := withTestEnvironment(t)
|
||||||
// must list snapshots more than once
|
// must list snapshots more than once
|
||||||
env.gopts.backendTestHook = nil
|
env.gopts.backendTestHook = nil
|
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -80,7 +81,7 @@ func addPruneOptions(c *cobra.Command) {
|
|||||||
func verifyPruneOptions(opts *PruneOptions) error {
|
func verifyPruneOptions(opts *PruneOptions) error {
|
||||||
opts.MaxRepackBytes = math.MaxUint64
|
opts.MaxRepackBytes = math.MaxUint64
|
||||||
if len(opts.MaxRepackSize) > 0 {
|
if len(opts.MaxRepackSize) > 0 {
|
||||||
size, err := parseSizeStr(opts.MaxRepackSize)
|
size, err := ui.ParseBytes(opts.MaxRepackSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -123,7 +124,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
size, err := parseSizeStr(maxUnused)
|
size, err := ui.ParseBytes(maxUnused)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
|
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
|
||||||
}
|
}
|
||||||
@@ -167,7 +168,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
|||||||
opts.unsafeRecovery = true
|
opts.unsafeRecovery = true
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -205,6 +206,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Trigger GC to reset garbage collection threshold
|
||||||
|
runtime.GC()
|
||||||
|
|
||||||
return doPrune(ctx, opts, gopts, repo, plan)
|
return doPrune(ctx, opts, gopts, repo, plan)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,7 +492,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi
|
|||||||
// Pack size does not fit and pack is needed => error
|
// Pack size does not fit and pack is needed => error
|
||||||
// If the pack is not needed, this is no error, the pack can
|
// If the pack is not needed, this is no error, the pack can
|
||||||
// and will be simply removed, see below.
|
// and will be simply removed, see below.
|
||||||
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic rebuild-index'.\n",
|
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
|
||||||
id.Str(), p.unusedSize+p.usedSize, packSize)
|
id.Str(), p.unusedSize+p.usedSize, packSize)
|
||||||
return errorSizeNotMatching
|
return errorSizeNotMatching
|
||||||
}
|
}
|
||||||
@@ -729,7 +733,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
|
|||||||
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("%s", err)
|
return errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also remove repacked packs
|
// Also remove repacked packs
|
||||||
|
221
cmd/restic/cmd_prune_integration_test.go
Normal file
221
cmd/restic/cmd_prune_integration_test.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
||||||
|
oldHook := gopts.backendTestHook
|
||||||
|
gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
|
||||||
|
defer func() {
|
||||||
|
gopts.backendTestHook = oldHook
|
||||||
|
}()
|
||||||
|
rtest.OK(t, runPrune(context.TODO(), opts, gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrune(t *testing.T) {
|
||||||
|
testPruneVariants(t, false)
|
||||||
|
testPruneVariants(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
|
||||||
|
suffix := ""
|
||||||
|
if unsafeNoSpaceRecovery {
|
||||||
|
suffix = "-recovery"
|
||||||
|
}
|
||||||
|
t.Run("0"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("50"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unlimited"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CachableOnly"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
t.Run("Small", func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
|
||||||
|
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPrunableRepo(t *testing.T, env *testEnvironment) {
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
|
||||||
|
testRunForgetJSON(t, env.gopts)
|
||||||
|
testRunForget(t, env.gopts, firstSnapshot.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.JSON = true
|
||||||
|
opts := ForgetOptions{
|
||||||
|
DryRun: true,
|
||||||
|
Last: 1,
|
||||||
|
}
|
||||||
|
return runForget(context.TODO(), opts, gopts, args)
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
var forgets []*ForgetGroup
|
||||||
|
rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
|
||||||
|
|
||||||
|
rtest.Assert(t, len(forgets) == 1,
|
||||||
|
"Expected 1 snapshot group, got %v", len(forgets))
|
||||||
|
rtest.Assert(t, len(forgets[0].Keep) == 1,
|
||||||
|
"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
|
||||||
|
rtest.Assert(t, len(forgets[0].Remove) == 2,
|
||||||
|
"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
createPrunableRepo(t, env)
|
||||||
|
testRunPrune(t, env.gopts, pruneOpts)
|
||||||
|
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
|
||||||
|
|
||||||
|
func TestPruneWithDamagedRepository(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// create and delete snapshot to create unused blobs
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
testRunForget(t, env.gopts, firstSnapshot.String())
|
||||||
|
|
||||||
|
oldPacks := listPacks(env.gopts, t)
|
||||||
|
|
||||||
|
// create new snapshot, but lose all data
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
removePacksExcept(env.gopts, t, oldPacks, false)
|
||||||
|
|
||||||
|
oldHook := env.gopts.backendTestHook
|
||||||
|
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
|
||||||
|
defer func() {
|
||||||
|
env.gopts.backendTestHook = oldHook
|
||||||
|
}()
|
||||||
|
// prune should fail
|
||||||
|
rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
|
||||||
|
"prune should have reported index not complete error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test repos for edge cases
|
||||||
|
func TestEdgeCaseRepos(t *testing.T) {
|
||||||
|
opts := CheckOptions{}
|
||||||
|
|
||||||
|
// repo where index is completely missing
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("no-index", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where an existing and used blob is missing from the index
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("index-missing-blob", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where a blob is missing
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("missing-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where blobs which are not needed are missing or in invalid pack files
|
||||||
|
// => check should fail and prune should repair this
|
||||||
|
t.Run("missing-unused-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where data exists that is not referenced
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("unreferenced-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where an obsolete index still exists
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("obsolete-index", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo which contains mixed (data/tree) packs
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("mixed-packs", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo which contains duplicate blobs
|
||||||
|
// => checking for unused data should report an error and prune resolves the
|
||||||
|
// situation
|
||||||
|
opts = CheckOptions{
|
||||||
|
ReadData: true,
|
||||||
|
CheckUnused: true,
|
||||||
|
}
|
||||||
|
t.Run("duplicates", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", tarfile)
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
if checkOK {
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
|
||||||
|
"check should have reported an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pruneOK {
|
||||||
|
testRunPrune(t, env.gopts, optionsPrune)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
|
||||||
|
"prune should have reported an error")
|
||||||
|
}
|
||||||
|
}
|
@@ -46,7 +46,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
14
cmd/restic/cmd_repair.go
Normal file
14
cmd/restic/cmd_repair.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cmdRepair = &cobra.Command{
|
||||||
|
Use: "repair",
|
||||||
|
Short: "Repair the repository",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmdRoot.AddCommand(cmdRepair)
|
||||||
|
}
|
@@ -7,15 +7,15 @@ import (
|
|||||||
"github.com/restic/restic/internal/pack"
|
"github.com/restic/restic/internal/pack"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cmdRebuildIndex = &cobra.Command{
|
var cmdRepairIndex = &cobra.Command{
|
||||||
Use: "rebuild-index [flags]",
|
Use: "index [flags]",
|
||||||
Short: "Build a new index",
|
Short: "Build a new index",
|
||||||
Long: `
|
Long: `
|
||||||
The "rebuild-index" command creates a new index based on the pack files in the
|
The "repair index" command creates a new index based on the pack files in the
|
||||||
repository.
|
repository.
|
||||||
|
|
||||||
EXIT STATUS
|
EXIT STATUS
|
||||||
@@ -25,40 +25,52 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runRebuildIndex(cmd.Context(), rebuildIndexOptions, globalOptions)
|
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// RebuildIndexOptions collects all options for the rebuild-index command.
|
var cmdRebuildIndex = &cobra.Command{
|
||||||
type RebuildIndexOptions struct {
|
Use: "rebuild-index [flags]",
|
||||||
|
Short: cmdRepairIndex.Short,
|
||||||
|
Long: cmdRepairIndex.Long,
|
||||||
|
Deprecated: `Use "repair index" instead`,
|
||||||
|
DisableAutoGenTag: true,
|
||||||
|
RunE: cmdRepairIndex.RunE,
|
||||||
|
}
|
||||||
|
|
||||||
|
// RepairIndexOptions collects all options for the repair index command.
|
||||||
|
type RepairIndexOptions struct {
|
||||||
ReadAllPacks bool
|
ReadAllPacks bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var rebuildIndexOptions RebuildIndexOptions
|
var repairIndexOptions RepairIndexOptions
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
cmdRepair.AddCommand(cmdRepairIndex)
|
||||||
|
// add alias for old name
|
||||||
cmdRoot.AddCommand(cmdRebuildIndex)
|
cmdRoot.AddCommand(cmdRebuildIndex)
|
||||||
f := cmdRebuildIndex.Flags()
|
|
||||||
f.BoolVar(&rebuildIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
|
|
||||||
|
|
||||||
|
for _, f := range []*pflag.FlagSet{cmdRepairIndex.Flags(), cmdRebuildIndex.Flags()} {
|
||||||
|
f.BoolVar(&repairIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runRebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions) error {
|
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
|
||||||
repo, err := OpenRepository(ctx, gopts)
|
repo, err := OpenRepository(ctx, gopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return rebuildIndex(ctx, opts, gopts, repo, restic.NewIDSet())
|
return rebuildIndex(ctx, opts, gopts, repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions, repo *repository.Repository, ignorePacks restic.IDSet) error {
|
func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
|
||||||
var obsoleteIndexes restic.IDs
|
var obsoleteIndexes restic.IDs
|
||||||
packSizeFromList := make(map[restic.ID]int64)
|
packSizeFromList := make(map[restic.ID]int64)
|
||||||
packSizeFromIndex := make(map[restic.ID]int64)
|
packSizeFromIndex := make(map[restic.ID]int64)
|
||||||
@@ -130,7 +142,7 @@ func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOpt
|
|||||||
|
|
||||||
if len(packSizeFromList) > 0 {
|
if len(packSizeFromList) > 0 {
|
||||||
Verbosef("reading pack files\n")
|
Verbosef("reading pack files\n")
|
||||||
bar := newProgressMax(!globalOptions.Quiet, uint64(len(packSizeFromList)), "packs")
|
bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs")
|
||||||
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
|
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
140
cmd/restic/cmd_repair_index_integration_test.go
Normal file
140
cmd/restic/cmd_repair_index_integration_test.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/index"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||||
|
rtest.OK(t, withRestoreGlobalOptions(func() error {
|
||||||
|
globalOptions.stdout = io.Discard
|
||||||
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
out, err := testRunCheckOutput(env.gopts, false)
|
||||||
|
if !strings.Contains(out, "contained in several indexes") {
|
||||||
|
t.Fatalf("did not find checker hint for packs in several indexes")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error from checker for test repository, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(out, "restic repair index") {
|
||||||
|
t.Fatalf("did not find hint for repair index command")
|
||||||
|
}
|
||||||
|
|
||||||
|
env.gopts.backendTestHook = backendTestHook
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
|
||||||
|
env.gopts.backendTestHook = nil
|
||||||
|
out, err = testRunCheckOutput(env.gopts, false)
|
||||||
|
if len(out) != 0 {
|
||||||
|
t.Fatalf("expected no output from the checker, got: %v", out)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error from checker after repair index, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRebuildIndex(t *testing.T) {
|
||||||
|
testRebuildIndex(t, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRebuildIndexAlwaysFull(t *testing.T) {
|
||||||
|
indexFull := index.IndexFull
|
||||||
|
defer func() {
|
||||||
|
index.IndexFull = indexFull
|
||||||
|
}()
|
||||||
|
index.IndexFull = func(*index.Index, bool) bool { return true }
|
||||||
|
testRebuildIndex(t, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexErrorBackend modifies the first index after reading.
|
||||||
|
type indexErrorBackend struct {
|
||||||
|
restic.Backend
|
||||||
|
lock sync.Mutex
|
||||||
|
hasErred bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
|
||||||
|
return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
|
||||||
|
// protect hasErred
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
if !b.hasErred && h.Type == restic.IndexFile {
|
||||||
|
b.hasErred = true
|
||||||
|
return consumer(errorReadCloser{rd})
|
||||||
|
}
|
||||||
|
return consumer(rd)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorReadCloser struct {
|
||||||
|
io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (erd errorReadCloser) Read(p []byte) (int, error) {
|
||||||
|
n, err := erd.Reader.Read(p)
|
||||||
|
if n > 0 {
|
||||||
|
p[0] ^= 1
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRebuildIndexDamage(t *testing.T) {
|
||||||
|
testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
|
||||||
|
return &indexErrorBackend{
|
||||||
|
Backend: r,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type appendOnlyBackend struct {
|
||||||
|
restic.Backend
|
||||||
|
}
|
||||||
|
|
||||||
|
// called via repo.Backend().Remove()
|
||||||
|
func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error {
|
||||||
|
return errors.Errorf("Failed to remove %v", h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
err := withRestoreGlobalOptions(func() error {
|
||||||
|
globalOptions.stdout = io.Discard
|
||||||
|
|
||||||
|
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
|
||||||
|
return &appendOnlyBackend{r}, nil
|
||||||
|
}
|
||||||
|
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected rebuildIndex to fail")
|
||||||
|
}
|
||||||
|
t.Log(err)
|
||||||
|
}
|
176
cmd/restic/cmd_repair_snapshots.go
Normal file
176
cmd/restic/cmd_repair_snapshots.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/backend"
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/walker"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cmdRepairSnapshots = &cobra.Command{
|
||||||
|
Use: "snapshots [flags] [snapshot ID] [...]",
|
||||||
|
Short: "Repair snapshots",
|
||||||
|
Long: `
|
||||||
|
The "repair snapshots" command repairs broken snapshots. It scans the given
|
||||||
|
snapshots and generates new ones with damaged directories and file contents
|
||||||
|
removed. If the broken snapshots are deleted, a prune run will be able to
|
||||||
|
clean up the repository.
|
||||||
|
|
||||||
|
The command depends on a correct index, thus make sure to run "repair index"
|
||||||
|
first!
|
||||||
|
|
||||||
|
|
||||||
|
WARNING
|
||||||
|
=======
|
||||||
|
|
||||||
|
Repairing and deleting broken snapshots causes data loss! It will remove broken
|
||||||
|
directories and modify broken files in the modified snapshots.
|
||||||
|
|
||||||
|
If the contents of directories and files are still available, the better option
|
||||||
|
is to run "backup" which in that case is able to heal existing snapshots. Only
|
||||||
|
use the "repair snapshots" command if you need to recover an old and broken
|
||||||
|
snapshot!
|
||||||
|
|
||||||
|
EXIT STATUS
|
||||||
|
===========
|
||||||
|
|
||||||
|
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||||
|
`,
|
||||||
|
DisableAutoGenTag: true,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runRepairSnapshots(cmd.Context(), globalOptions, repairSnapshotOptions, args)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RepairOptions collects all options for the repair command.
|
||||||
|
type RepairOptions struct {
|
||||||
|
DryRun bool
|
||||||
|
Forget bool
|
||||||
|
|
||||||
|
restic.SnapshotFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
var repairSnapshotOptions RepairOptions
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmdRepair.AddCommand(cmdRepairSnapshots)
|
||||||
|
flags := cmdRepairSnapshots.Flags()
|
||||||
|
|
||||||
|
flags.BoolVarP(&repairSnapshotOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
|
||||||
|
flags.BoolVarP(&repairSnapshotOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
|
||||||
|
|
||||||
|
initMultiSnapshotFilter(flags, &repairSnapshotOptions.SnapshotFilter, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error {
|
||||||
|
repo, err := OpenRepository(ctx, gopts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.DryRun {
|
||||||
|
var lock *restic.Lock
|
||||||
|
var err error
|
||||||
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
|
defer unlockRepo(lock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
repo.SetDryRun()
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := repo.LoadIndex(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Three error cases are checked:
|
||||||
|
// - tree is a nil tree (-> will be replaced by an empty tree)
|
||||||
|
// - trees which cannot be loaded (-> the tree contents will be removed)
|
||||||
|
// - files whose contents are not fully available (-> file will be modified)
|
||||||
|
rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
|
||||||
|
RewriteNode: func(node *restic.Node, path string) *restic.Node {
|
||||||
|
if node.Type != "file" {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
var newContent restic.IDs = restic.IDs{}
|
||||||
|
var newSize uint64
|
||||||
|
// check all contents and remove if not available
|
||||||
|
for _, id := range node.Content {
|
||||||
|
if size, found := repo.LookupBlobSize(id, restic.DataBlob); !found {
|
||||||
|
ok = false
|
||||||
|
} else {
|
||||||
|
newContent = append(newContent, id)
|
||||||
|
newSize += uint64(size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
Verbosef(" file %q: removed missing content\n", path)
|
||||||
|
} else if newSize != node.Size {
|
||||||
|
Verbosef(" file %q: fixed incorrect size\n", path)
|
||||||
|
}
|
||||||
|
// no-ops if already correct
|
||||||
|
node.Content = newContent
|
||||||
|
node.Size = newSize
|
||||||
|
return node
|
||||||
|
},
|
||||||
|
RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) {
|
||||||
|
if path == "/" {
|
||||||
|
Verbosef(" dir %q: not readable\n", path)
|
||||||
|
// remove snapshots with invalid root node
|
||||||
|
return restic.ID{}, nil
|
||||||
|
}
|
||||||
|
// If a subtree fails to load, remove it
|
||||||
|
Verbosef(" dir %q: replaced with empty directory\n", path)
|
||||||
|
emptyID, err := restic.SaveTree(ctx, repo, &restic.Tree{})
|
||||||
|
if err != nil {
|
||||||
|
return restic.ID{}, err
|
||||||
|
}
|
||||||
|
return emptyID, nil
|
||||||
|
},
|
||||||
|
AllowUnstableSerialization: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
changedCount := 0
|
||||||
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
|
||||||
|
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
|
||||||
|
changed, err := filterAndReplaceSnapshot(ctx, repo, sn,
|
||||||
|
func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
|
||||||
|
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
|
||||||
|
}, opts.DryRun, opts.Forget, "repaired")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)
|
||||||
|
}
|
||||||
|
if changed {
|
||||||
|
changedCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Verbosef("\n")
|
||||||
|
if changedCount == 0 {
|
||||||
|
if !opts.DryRun {
|
||||||
|
Verbosef("no snapshots were modified\n")
|
||||||
|
} else {
|
||||||
|
Verbosef("no snapshots would be modified\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !opts.DryRun {
|
||||||
|
Verbosef("modified %v snapshots\n", changedCount)
|
||||||
|
} else {
|
||||||
|
Verbosef("would modify %v snapshots\n", changedCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
135
cmd/restic/cmd_repair_snapshots_integration_test.go
Normal file
135
cmd/restic/cmd_repair_snapshots_integration_test.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"hash/fnv"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunRepairSnapshot(t testing.TB, gopts GlobalOptions, forget bool) {
|
||||||
|
opts := RepairOptions{
|
||||||
|
Forget: forget,
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, runRepairSnapshots(context.TODO(), gopts, opts, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRandomFile(t testing.TB, env *testEnvironment, path string, size int) {
|
||||||
|
fn := filepath.Join(env.testdata, path)
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(fn), 0o755))
|
||||||
|
|
||||||
|
h := fnv.New64()
|
||||||
|
_, err := h.Write([]byte(path))
|
||||||
|
rtest.OK(t, err)
|
||||||
|
r := rand.New(rand.NewSource(int64(h.Sum64())))
|
||||||
|
|
||||||
|
f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0o644)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
_, err = io.Copy(f, io.LimitReader(r, int64(size)))
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.OK(t, f.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairSnapshotsWithLostData(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
createRandomFile(t, env, "foo/bar/file", 512*1024)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
// damage repository
|
||||||
|
removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
|
||||||
|
|
||||||
|
createRandomFile(t, env, "foo/bar/file2", 256*1024)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 2)
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// repair but keep broken snapshots
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
testRunRepairSnapshot(t, env.gopts, false)
|
||||||
|
testListSnapshots(t, env.gopts, 4)
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// repository must be ok after removing the broken snapshots
|
||||||
|
testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String())
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
_, err := testRunCheckOutput(env.gopts, false)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairSnapshotsWithLostTree(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
createRandomFile(t, env, "foo/bar/file", 12345)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
oldSnapshot := testListSnapshots(t, env.gopts, 1)
|
||||||
|
oldPacks := testRunList(t, "packs", env.gopts)
|
||||||
|
|
||||||
|
// keep foo/bar unchanged
|
||||||
|
createRandomFile(t, env, "foo/bar2", 1024)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
// remove tree for foo/bar and the now completely broken first snapshot
|
||||||
|
removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
|
||||||
|
testRunForget(t, env.gopts, oldSnapshot[0].String())
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// repair
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
testRunRepairSnapshot(t, env.gopts, true)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
_, err := testRunCheckOutput(env.gopts, false)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairSnapshotsWithLostRootTree(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
createRandomFile(t, env, "foo/bar/file", 12345)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
oldPacks := testRunList(t, "packs", env.gopts)
|
||||||
|
|
||||||
|
// remove all trees
|
||||||
|
removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// repair
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
testRunRepairSnapshot(t, env.gopts, true)
|
||||||
|
testListSnapshots(t, env.gopts, 0)
|
||||||
|
_, err := testRunCheckOutput(env.gopts, false)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairSnapshotsIntact(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{}, env.gopts)
|
||||||
|
oldSnapshotIDs := testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
// use an exclude that will not exclude anything
|
||||||
|
testRunRepairSnapshot(t, env.gopts, false)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 1)
|
||||||
|
rtest.Assert(t, reflect.DeepEqual(oldSnapshotIDs, snapshotIDs), "unexpected snapshot id mismatch %v vs. %v", oldSnapshotIDs, snapshotIDs)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
@@ -10,6 +11,9 @@ import (
|
|||||||
"github.com/restic/restic/internal/filter"
|
"github.com/restic/restic/internal/filter"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/restic/restic/internal/restorer"
|
"github.com/restic/restic/internal/restorer"
|
||||||
|
"github.com/restic/restic/internal/ui"
|
||||||
|
restoreui "github.com/restic/restic/internal/ui/restore"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -31,7 +35,31 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runRestore(cmd.Context(), restoreOptions, globalOptions, args)
|
ctx := cmd.Context()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
cancelCtx, cancel := context.WithCancel(ctx)
|
||||||
|
defer func() {
|
||||||
|
// shutdown termstatus
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
term.Run(cancelCtx)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// allow usage of warnf / verbosef
|
||||||
|
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
|
||||||
|
defer func() {
|
||||||
|
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
|
||||||
|
}()
|
||||||
|
stdioWrapper := ui.NewStdioWrapper(term)
|
||||||
|
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
|
||||||
|
|
||||||
|
return runRestore(ctx, restoreOptions, globalOptions, term, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +70,7 @@ type RestoreOptions struct {
|
|||||||
Include []string
|
Include []string
|
||||||
InsensitiveInclude []string
|
InsensitiveInclude []string
|
||||||
Target string
|
Target string
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
Sparse bool
|
Sparse bool
|
||||||
Verify bool
|
Verify bool
|
||||||
}
|
}
|
||||||
@@ -59,12 +87,14 @@ func init() {
|
|||||||
flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as `--include` but ignores the casing of filenames")
|
flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as `--include` but ignores the casing of filenames")
|
||||||
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
|
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
|
||||||
|
|
||||||
initSingleSnapshotFilterOptions(flags, &restoreOptions.snapshotFilterOptions)
|
initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter)
|
||||||
flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse")
|
flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse")
|
||||||
flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content")
|
flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content")
|
||||||
}
|
}
|
||||||
|
|
||||||
func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
||||||
|
term *termstatus.Terminal, args []string) error {
|
||||||
|
|
||||||
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
|
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
|
||||||
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
|
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
|
||||||
|
|
||||||
@@ -124,14 +154,18 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := restic.FindFilteredSnapshot(ctx, repo.Backend(), repo, opts.Hosts, opts.Tags, opts.Paths, nil, snapshotIDString)
|
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||||
|
Hosts: opts.Hosts,
|
||||||
|
Paths: opts.Paths,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
}).FindLatest(ctx, repo.Backend(), repo, snapshotIDString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("failed to find snapshot: %v", err)
|
return errors.Fatalf("failed to find snapshot: %v", err)
|
||||||
}
|
}
|
||||||
@@ -141,11 +175,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := restorer.NewRestorer(ctx, repo, sn, opts.Sparse)
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := ui.NewMessage(term, gopts.verbosity)
|
||||||
|
var printer restoreui.ProgressPrinter
|
||||||
|
if gopts.JSON {
|
||||||
|
printer = restoreui.NewJSONProgress(term)
|
||||||
|
} else {
|
||||||
|
printer = restoreui.NewTextProgress(term)
|
||||||
|
}
|
||||||
|
|
||||||
|
progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||||
|
res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
|
||||||
|
|
||||||
totalErrors := 0
|
totalErrors := 0
|
||||||
res.Error = func(location string, err error) error {
|
res.Error = func(location string, err error) error {
|
||||||
Warnf("ignoring error for %s: %s\n", location, err)
|
msg.E("ignoring error for %s: %s\n", location, err)
|
||||||
totalErrors++
|
totalErrors++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -155,12 +203,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||||
matched, err := filter.List(excludePatterns, item)
|
matched, err := filter.List(excludePatterns, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error for exclude pattern: %v", err)
|
msg.E("error for exclude pattern: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item))
|
matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error for iexclude pattern: %v", err)
|
msg.E("error for iexclude pattern: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// An exclude filter is basically a 'wildcard but foo',
|
// An exclude filter is basically a 'wildcard but foo',
|
||||||
@@ -178,12 +226,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||||
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
|
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error for include pattern: %v", err)
|
msg.E("error for include pattern: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item))
|
matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error for iexclude pattern: %v", err)
|
msg.E("error for iexclude pattern: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selectedForRestore = matched || matchedInsensitive
|
selectedForRestore = matched || matchedInsensitive
|
||||||
@@ -198,19 +246,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
res.SelectFilter = selectIncludeFilter
|
res.SelectFilter = selectIncludeFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
if !gopts.JSON {
|
||||||
|
msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
||||||
|
}
|
||||||
|
|
||||||
err = res.RestoreTo(ctx, opts.Target)
|
err = res.RestoreTo(ctx, opts.Target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
progress.Finish()
|
||||||
|
|
||||||
if totalErrors > 0 {
|
if totalErrors > 0 {
|
||||||
return errors.Fatalf("There were %d errors\n", totalErrors)
|
return errors.Fatalf("There were %d errors\n", totalErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Verify {
|
if opts.Verify {
|
||||||
Verbosef("verifying files in %s\n", opts.Target)
|
if !gopts.JSON {
|
||||||
|
msg.P("verifying files in %s\n", opts.Target)
|
||||||
|
}
|
||||||
var count int
|
var count int
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
count, err = res.VerifyFiles(ctx, opts.Target)
|
count, err = res.VerifyFiles(ctx, opts.Target)
|
||||||
@@ -220,8 +274,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
|
|||||||
if totalErrors > 0 {
|
if totalErrors > 0 {
|
||||||
return errors.Fatalf("There were %d errors\n", totalErrors)
|
return errors.Fatalf("There were %d errors\n", totalErrors)
|
||||||
}
|
}
|
||||||
Verbosef("finished verifying %d files in %s (took %s)\n", count, opts.Target,
|
|
||||||
time.Since(t0).Round(time.Millisecond))
|
if !gopts.JSON {
|
||||||
|
msg.P("finished verifying %d files in %s (took %s)\n", count, opts.Target,
|
||||||
|
time.Since(t0).Round(time.Millisecond))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
307
cmd/restic/cmd_restore_integration_test.go
Normal file
307
cmd/restic/cmd_restore_integration_test.go
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
mrand "math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/filter"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
|
||||||
|
testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
|
||||||
|
opts := RestoreOptions{
|
||||||
|
Target: dir,
|
||||||
|
Exclude: excludes,
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||||
|
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
return runRestore(ctx, opts, gopts, term, []string{snapshotID})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
|
||||||
|
opts := RestoreOptions{
|
||||||
|
Target: dir,
|
||||||
|
SnapshotFilter: restic.SnapshotFilter{
|
||||||
|
Hosts: hosts,
|
||||||
|
Paths: paths,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, testRunRestoreAssumeFailure("latest", opts, gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
|
||||||
|
opts := RestoreOptions{
|
||||||
|
Target: dir,
|
||||||
|
Include: includes,
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreFilter(t *testing.T) {
|
||||||
|
testfiles := []struct {
|
||||||
|
name string
|
||||||
|
size uint
|
||||||
|
}{
|
||||||
|
{"testfile1.c", 100},
|
||||||
|
{"testfile2.exe", 101},
|
||||||
|
{"subdir1/subdir2/testfile3.docx", 102},
|
||||||
|
{"subdir1/subdir2/testfile4.c", 102},
|
||||||
|
}
|
||||||
|
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
for _, testFile := range testfiles {
|
||||||
|
p := filepath.Join(env.testdata, testFile.name)
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, testFile.size))
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
snapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// no restore filter should restore all files
|
||||||
|
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
|
||||||
|
for _, testFile := range testfiles {
|
||||||
|
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
|
||||||
|
base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
|
||||||
|
testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
|
||||||
|
for _, testFile := range testfiles {
|
||||||
|
err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
|
||||||
|
if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
|
||||||
|
rtest.OK(t, err)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, os.IsNotExist(err),
|
||||||
|
"expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestore(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// Restore latest without any filters
|
||||||
|
restoredir := filepath.Join(env.base, "restore")
|
||||||
|
testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
|
||||||
|
|
||||||
|
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
|
||||||
|
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreLatest(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "testfile.c")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 100))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// chdir manually here so we can get the current directory. This is not the
|
||||||
|
// same as the temp dir returned by os.MkdirTemp() on darwin.
|
||||||
|
back := rtest.Chdir(t, filepath.Dir(env.testdata))
|
||||||
|
defer back()
|
||||||
|
|
||||||
|
curdir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
rtest.OK(t, os.Remove(p))
|
||||||
|
rtest.OK(t, appendRandomData(p, 101))
|
||||||
|
testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// Restore latest without any filters
|
||||||
|
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
|
||||||
|
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
|
||||||
|
|
||||||
|
// Setup test files in different directories backed up in different snapshots
|
||||||
|
p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
|
||||||
|
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p1, 102))
|
||||||
|
testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
|
||||||
|
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p2, 103))
|
||||||
|
testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
|
||||||
|
p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
|
||||||
|
|
||||||
|
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
|
||||||
|
rtest.OK(t, testFileSize(p1rAbs, int64(102)))
|
||||||
|
if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
|
||||||
|
rtest.Assert(t, os.IsNotExist(err),
|
||||||
|
"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
|
||||||
|
rtest.OK(t, testFileSize(p2rAbs, int64(103)))
|
||||||
|
if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
|
||||||
|
rtest.Assert(t, os.IsNotExist(err),
|
||||||
|
"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreWithPermissionFailure(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
snapshots := testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
_ = withRestoreGlobalOptions(func() error {
|
||||||
|
globalOptions.stderr = io.Discard
|
||||||
|
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// make sure that all files have been restored, regardless of any
|
||||||
|
// permission errors
|
||||||
|
files := testRunLs(t, env.gopts, snapshots[0].String())
|
||||||
|
for _, filename := range files {
|
||||||
|
fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
|
||||||
|
"file %v restored, but filesize is 0", filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setZeroModTime(filename string) error {
|
||||||
|
var utimes = []syscall.Timespec{
|
||||||
|
syscall.NsecToTimespec(0),
|
||||||
|
syscall.NsecToTimespec(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.UtimesNano(filename, utimes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 200))
|
||||||
|
rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
snapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// restore with filter "*.ext", this should restore "file.ext", but
|
||||||
|
// since the directories are ignored and only created because of
|
||||||
|
// "file.ext", no meta data should be restored for them.
|
||||||
|
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
|
||||||
|
|
||||||
|
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
|
||||||
|
_, err := os.Stat(f1)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
// restore with filter "*", this should restore meta data on everything.
|
||||||
|
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
|
||||||
|
|
||||||
|
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
|
||||||
|
fi, err := os.Stat(f2)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
|
||||||
|
"meta data of intermediate directory hasn't been restore")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestoreLocalLayout(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
filename string
|
||||||
|
layout string
|
||||||
|
}{
|
||||||
|
{"repo-layout-default.tar.gz", ""},
|
||||||
|
{"repo-layout-s3legacy.tar.gz", ""},
|
||||||
|
{"repo-layout-default.tar.gz", "default"},
|
||||||
|
{"repo-layout-s3legacy.tar.gz", "s3legacy"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
|
||||||
|
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
env.gopts.extended["local.layout"] = test.layout
|
||||||
|
|
||||||
|
// check the repo
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// restore latest snapshot
|
||||||
|
target := filepath.Join(env.base, "restore")
|
||||||
|
testRunRestoreLatest(t, env.gopts, target, nil, nil)
|
||||||
|
|
||||||
|
rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||||
|
rtest.RemoveAll(t, target)
|
||||||
|
}
|
||||||
|
}
|
@@ -51,7 +51,7 @@ type RewriteOptions struct {
|
|||||||
Forget bool
|
Forget bool
|
||||||
DryRun bool
|
DryRun bool
|
||||||
|
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
excludePatternOptions
|
excludePatternOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ func init() {
|
|||||||
f.BoolVarP(&rewriteOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
|
f.BoolVarP(&rewriteOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
|
||||||
f.BoolVarP(&rewriteOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
|
f.BoolVarP(&rewriteOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
|
||||||
|
|
||||||
initMultiSnapshotFilterOptions(f, &rewriteOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true)
|
||||||
initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions)
|
initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,36 +87,67 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
|
||||||
|
RewriteNode: func(node *restic.Node, path string) *restic.Node {
|
||||||
|
if selectByName(path) {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
Verbosef(fmt.Sprintf("excluding %s\n", path))
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
DisableNodeCache: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
return filterAndReplaceSnapshot(ctx, repo, sn,
|
||||||
|
func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
|
||||||
|
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
|
||||||
|
}, opts.DryRun, opts.Forget, "rewrite")
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, filter func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error), dryRun bool, forget bool, addTag string) (bool, error) {
|
||||||
|
|
||||||
wg, wgCtx := errgroup.WithContext(ctx)
|
wg, wgCtx := errgroup.WithContext(ctx)
|
||||||
repo.StartPackUploader(wgCtx, wg)
|
repo.StartPackUploader(wgCtx, wg)
|
||||||
|
|
||||||
var filteredTree restic.ID
|
var filteredTree restic.ID
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
filteredTree, err = walker.FilterTree(wgCtx, repo, "/", *sn.Tree, &walker.TreeFilterVisitor{
|
var err error
|
||||||
SelectByName: selectByName,
|
filteredTree, err = filter(ctx, sn)
|
||||||
PrintExclude: func(path string) { Verbosef(fmt.Sprintf("excluding %s\n", path)) },
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return repo.Flush(wgCtx)
|
return repo.Flush(wgCtx)
|
||||||
})
|
})
|
||||||
err = wg.Wait()
|
err := wg.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if filteredTree.IsNull() {
|
||||||
|
if dryRun {
|
||||||
|
Verbosef("would delete empty snapshot\n")
|
||||||
|
} else {
|
||||||
|
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||||
|
if err = repo.Backend().Remove(ctx, h); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
debug.Log("removed empty snapshot %v", sn.ID())
|
||||||
|
Verbosef("removed empty snapshot %v\n", sn.ID().Str())
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
if filteredTree == *sn.Tree {
|
if filteredTree == *sn.Tree {
|
||||||
debug.Log("Snapshot %v not modified", sn)
|
debug.Log("Snapshot %v not modified", sn)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Snapshot %v modified", sn)
|
debug.Log("Snapshot %v modified", sn)
|
||||||
if opts.DryRun {
|
if dryRun {
|
||||||
Verbosef("would save new snapshot\n")
|
Verbosef("would save new snapshot\n")
|
||||||
|
|
||||||
if opts.Forget {
|
if forget {
|
||||||
Verbosef("would remove old snapshot\n")
|
Verbosef("would remove old snapshot\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,10 +156,10 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
|
|||||||
|
|
||||||
// Always set the original snapshot id as this essentially a new snapshot.
|
// Always set the original snapshot id as this essentially a new snapshot.
|
||||||
sn.Original = sn.ID()
|
sn.Original = sn.ID()
|
||||||
*sn.Tree = filteredTree
|
sn.Tree = &filteredTree
|
||||||
|
|
||||||
if !opts.Forget {
|
if !forget {
|
||||||
sn.AddTags([]string{"rewrite"})
|
sn.AddTags([]string{addTag})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the new snapshot.
|
// Save the new snapshot.
|
||||||
@@ -136,8 +167,9 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
Verbosef("saved new snapshot %v\n", id.Str())
|
||||||
|
|
||||||
if opts.Forget {
|
if forget {
|
||||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||||
if err = repo.Backend().Remove(ctx, h); err != nil {
|
if err = repo.Backend().Remove(ctx, h); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -145,7 +177,6 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
|
|||||||
debug.Log("removed old snapshot %v", sn.ID())
|
debug.Log("removed old snapshot %v", sn.ID())
|
||||||
Verbosef("removed old snapshot %v\n", sn.ID().Str())
|
Verbosef("removed old snapshot %v\n", sn.ID().Str())
|
||||||
}
|
}
|
||||||
Verbosef("saved new snapshot %v\n", id.Str())
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,9 +195,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
|||||||
var err error
|
var err error
|
||||||
if opts.Forget {
|
if opts.Forget {
|
||||||
Verbosef("create exclusive lock for repository\n")
|
Verbosef("create exclusive lock for repository\n")
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
} else {
|
} else {
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
}
|
}
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -186,7 +217,7 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
changedCount := 0
|
changedCount := 0
|
||||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
|
||||||
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
|
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
|
||||||
changed, err := rewriteSnapshot(ctx, repo, sn, opts)
|
changed, err := rewriteSnapshot(ctx, repo, sn, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -32,11 +32,11 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
|
|
||||||
// SnapshotOptions bundles all options for the snapshots command.
|
// SnapshotOptions bundles all options for the snapshots command.
|
||||||
type SnapshotOptions struct {
|
type SnapshotOptions struct {
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
Compact bool
|
Compact bool
|
||||||
Last bool // This option should be removed in favour of Latest.
|
Last bool // This option should be removed in favour of Latest.
|
||||||
Latest int
|
Latest int
|
||||||
GroupBy string
|
GroupBy restic.SnapshotGroupByOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
var snapshotOptions SnapshotOptions
|
var snapshotOptions SnapshotOptions
|
||||||
@@ -45,7 +45,7 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdSnapshots)
|
cmdRoot.AddCommand(cmdSnapshots)
|
||||||
|
|
||||||
f := cmdSnapshots.Flags()
|
f := cmdSnapshots.Flags()
|
||||||
initMultiSnapshotFilterOptions(f, &snapshotOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(f, &snapshotOptions.SnapshotFilter, true)
|
||||||
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact output format")
|
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact output format")
|
||||||
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
|
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
|
||||||
err := f.MarkDeprecated("last", "use --latest 1")
|
err := f.MarkDeprecated("last", "use --latest 1")
|
||||||
@@ -54,7 +54,7 @@ func init() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
f.IntVar(&snapshotOptions.Latest, "latest", 0, "only show the last `n` snapshots for each host and path")
|
f.IntVar(&snapshotOptions.Latest, "latest", 0, "only show the last `n` snapshots for each host and path")
|
||||||
f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "`group` snapshots by host, paths and/or tags, separated by comma")
|
f.VarP(&snapshotOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma")
|
||||||
}
|
}
|
||||||
|
|
||||||
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||||
@@ -65,7 +65,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -73,7 +73,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
|||||||
}
|
}
|
||||||
|
|
||||||
var snapshots restic.Snapshots
|
var snapshots restic.Snapshots
|
||||||
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
|
||||||
snapshots = append(snapshots, sn)
|
snapshots = append(snapshots, sn)
|
||||||
}
|
}
|
||||||
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||||
@@ -94,7 +94,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
|
err := printSnapshotGroupJSON(globalOptions.stdout, snapshotGroups, grouped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error printing snapshots: %v\n", err)
|
Warnf("error printing snapshots: %v\n", err)
|
||||||
}
|
}
|
||||||
@@ -103,13 +103,13 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
|||||||
|
|
||||||
for k, list := range snapshotGroups {
|
for k, list := range snapshotGroups {
|
||||||
if grouped {
|
if grouped {
|
||||||
err := PrintSnapshotGroupHeader(gopts.stdout, k)
|
err := PrintSnapshotGroupHeader(globalOptions.stdout, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("error printing snapshots: %v\n", err)
|
Warnf("error printing snapshots: %v\n", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
|
PrintSnapshots(globalOptions.stdout, list, nil, opts.Compact)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
32
cmd/restic/cmd_snapshots_integration_test.go
Normal file
32
cmd/restic/cmd_snapshots_integration_test.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.JSON = true
|
||||||
|
|
||||||
|
opts := SnapshotOptions{}
|
||||||
|
return runSnapshots(context.TODO(), opts, gopts, []string{})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
snapshots := []Snapshot{}
|
||||||
|
rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
|
||||||
|
|
||||||
|
snapmap = make(map[restic.ID]Snapshot, len(snapshots))
|
||||||
|
for _, sn := range snapshots {
|
||||||
|
snapmap[*sn.ID] = sn
|
||||||
|
if newest == nil || sn.Time.After(newest.Time) {
|
||||||
|
newest = &sn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
@@ -5,11 +5,15 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/restic/chunker"
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
"github.com/restic/restic/internal/crypto"
|
"github.com/restic/restic/internal/crypto"
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/restic/restic/internal/ui"
|
"github.com/restic/restic/internal/ui"
|
||||||
|
"github.com/restic/restic/internal/ui/table"
|
||||||
"github.com/restic/restic/internal/walker"
|
"github.com/restic/restic/internal/walker"
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
@@ -49,7 +53,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
`,
|
`,
|
||||||
DisableAutoGenTag: true,
|
DisableAutoGenTag: true,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runStats(cmd.Context(), globalOptions, args)
|
return runStats(cmd.Context(), statsOptions, globalOptions, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +62,7 @@ type StatsOptions struct {
|
|||||||
// the mode of counting to perform (see consts for available modes)
|
// the mode of counting to perform (see consts for available modes)
|
||||||
countMode string
|
countMode string
|
||||||
|
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
var statsOptions StatsOptions
|
var statsOptions StatsOptions
|
||||||
@@ -67,11 +71,11 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdStats)
|
cmdRoot.AddCommand(cmdStats)
|
||||||
f := cmdStats.Flags()
|
f := cmdStats.Flags()
|
||||||
f.StringVar(&statsOptions.countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data")
|
f.StringVar(&statsOptions.countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data")
|
||||||
initMultiSnapshotFilterOptions(f, &statsOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args []string) error {
|
||||||
err := verifyStatsInput(gopts, args)
|
err := verifyStatsInput(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -83,7 +87,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -99,6 +103,10 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.countMode == countModeDebug {
|
||||||
|
return statsDebug(ctx, repo)
|
||||||
|
}
|
||||||
|
|
||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
Printf("scanning...\n")
|
Printf("scanning...\n")
|
||||||
}
|
}
|
||||||
@@ -111,8 +119,8 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
SnapshotsCount: 0,
|
SnapshotsCount: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, statsOptions.Hosts, statsOptions.Tags, statsOptions.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
|
||||||
err = statsWalkSnapshot(ctx, sn, repo, stats)
|
err = statsWalkSnapshot(ctx, sn, repo, opts, stats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error walking snapshot: %v", err)
|
return fmt.Errorf("error walking snapshot: %v", err)
|
||||||
}
|
}
|
||||||
@@ -122,7 +130,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if statsOptions.countMode == countModeRawData {
|
if opts.countMode == countModeRawData {
|
||||||
// the blob handles have been collected, but not yet counted
|
// the blob handles have been collected, but not yet counted
|
||||||
for blobHandle := range stats.blobs {
|
for blobHandle := range stats.blobs {
|
||||||
pbs := repo.Index().Lookup(blobHandle)
|
pbs := repo.Index().Lookup(blobHandle)
|
||||||
@@ -156,7 +164,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
Printf("Stats in %s mode:\n", statsOptions.countMode)
|
Printf("Stats in %s mode:\n", opts.countMode)
|
||||||
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
|
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
|
||||||
if stats.TotalBlobCount > 0 {
|
if stats.TotalBlobCount > 0 {
|
||||||
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
|
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
|
||||||
@@ -181,21 +189,21 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {
|
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
|
||||||
if snapshot.Tree == nil {
|
if snapshot.Tree == nil {
|
||||||
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
|
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.SnapshotsCount++
|
stats.SnapshotsCount++
|
||||||
|
|
||||||
if statsOptions.countMode == countModeRawData {
|
if opts.countMode == countModeRawData {
|
||||||
// count just the sizes of unique blobs; we don't need to walk the tree
|
// count just the sizes of unique blobs; we don't need to walk the tree
|
||||||
// ourselves in this case, since a nifty function does it for us
|
// ourselves in this case, since a nifty function does it for us
|
||||||
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
|
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
uniqueInodes := make(map[uint64]struct{})
|
uniqueInodes := make(map[uint64]struct{})
|
||||||
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))
|
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, uniqueInodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
|
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
|
||||||
}
|
}
|
||||||
@@ -203,7 +211,7 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
|
func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
|
||||||
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
|
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
|
||||||
if nodeErr != nil {
|
if nodeErr != nil {
|
||||||
return true, nodeErr
|
return true, nodeErr
|
||||||
@@ -212,19 +220,19 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {
|
if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
|
||||||
// only count this file if we haven't visited it before
|
// only count this file if we haven't visited it before
|
||||||
fid := makeFileIDByContents(node)
|
fid := makeFileIDByContents(node)
|
||||||
if _, ok := stats.uniqueFiles[fid]; !ok {
|
if _, ok := stats.uniqueFiles[fid]; !ok {
|
||||||
// mark the file as visited
|
// mark the file as visited
|
||||||
stats.uniqueFiles[fid] = struct{}{}
|
stats.uniqueFiles[fid] = struct{}{}
|
||||||
|
|
||||||
if statsOptions.countMode == countModeUniqueFilesByContents {
|
if opts.countMode == countModeUniqueFilesByContents {
|
||||||
// simply count the size of each unique file (unique by contents only)
|
// simply count the size of each unique file (unique by contents only)
|
||||||
stats.TotalSize += node.Size
|
stats.TotalSize += node.Size
|
||||||
stats.TotalFileCount++
|
stats.TotalFileCount++
|
||||||
}
|
}
|
||||||
if statsOptions.countMode == countModeBlobsPerFile {
|
if opts.countMode == countModeBlobsPerFile {
|
||||||
// count the size of each unique blob reference, which is
|
// count the size of each unique blob reference, which is
|
||||||
// by unique file (unique by contents and file path)
|
// by unique file (unique by contents and file path)
|
||||||
for _, blobID := range node.Content {
|
for _, blobID := range node.Content {
|
||||||
@@ -254,7 +262,7 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if statsOptions.countMode == countModeRestoreSize {
|
if opts.countMode == countModeRestoreSize {
|
||||||
// as this is a file in the snapshot, we can simply count its
|
// as this is a file in the snapshot, we can simply count its
|
||||||
// size without worrying about uniqueness, since duplicate files
|
// size without worrying about uniqueness, since duplicate files
|
||||||
// will still be restored
|
// will still be restored
|
||||||
@@ -284,15 +292,16 @@ func makeFileIDByContents(node *restic.Node) fileID {
|
|||||||
return sha256.Sum256(bb)
|
return sha256.Sum256(bb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyStatsInput(gopts GlobalOptions, args []string) error {
|
func verifyStatsInput(opts StatsOptions) error {
|
||||||
// require a recognized counting mode
|
// require a recognized counting mode
|
||||||
switch statsOptions.countMode {
|
switch opts.countMode {
|
||||||
case countModeRestoreSize:
|
case countModeRestoreSize:
|
||||||
case countModeUniqueFilesByContents:
|
case countModeUniqueFilesByContents:
|
||||||
case countModeBlobsPerFile:
|
case countModeBlobsPerFile:
|
||||||
case countModeRawData:
|
case countModeRawData:
|
||||||
|
case countModeDebug:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", statsOptions.countMode)
|
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -335,4 +344,149 @@ const (
|
|||||||
countModeUniqueFilesByContents = "files-by-contents"
|
countModeUniqueFilesByContents = "files-by-contents"
|
||||||
countModeBlobsPerFile = "blobs-per-file"
|
countModeBlobsPerFile = "blobs-per-file"
|
||||||
countModeRawData = "raw-data"
|
countModeRawData = "raw-data"
|
||||||
|
countModeDebug = "debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func statsDebug(ctx context.Context, repo restic.Repository) error {
|
||||||
|
Warnf("Collecting size statistics\n\n")
|
||||||
|
for _, t := range []restic.FileType{restic.KeyFile, restic.LockFile, restic.IndexFile, restic.PackFile} {
|
||||||
|
hist, err := statsDebugFileType(ctx, repo, t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Warnf("File Type: %v\n%v\n", t, hist)
|
||||||
|
}
|
||||||
|
|
||||||
|
hist := statsDebugBlobs(ctx, repo)
|
||||||
|
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||||
|
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
|
||||||
|
hist := newSizeHistogram(2 * repository.MaxPackSize)
|
||||||
|
err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
|
||||||
|
hist.Add(uint64(size))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return hist, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
|
||||||
|
var hist [restic.NumBlobTypes]*sizeHistogram
|
||||||
|
for i := 0; i < len(hist); i++ {
|
||||||
|
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||||
|
hist[pb.Type].Add(uint64(pb.Length))
|
||||||
|
})
|
||||||
|
|
||||||
|
return hist
|
||||||
|
}
|
||||||
|
|
||||||
|
type sizeClass struct {
|
||||||
|
lower, upper uint64
|
||||||
|
count int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type sizeHistogram struct {
|
||||||
|
count int64
|
||||||
|
totalSize uint64
|
||||||
|
buckets []sizeClass
|
||||||
|
oversized []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSizeHistogram(sizeLimit uint64) *sizeHistogram {
|
||||||
|
h := &sizeHistogram{}
|
||||||
|
h.buckets = append(h.buckets, sizeClass{0, 0, 0})
|
||||||
|
|
||||||
|
lowerBound := uint64(1)
|
||||||
|
growthFactor := uint64(10)
|
||||||
|
|
||||||
|
for lowerBound < sizeLimit {
|
||||||
|
upperBound := lowerBound*growthFactor - 1
|
||||||
|
if upperBound > sizeLimit {
|
||||||
|
upperBound = sizeLimit
|
||||||
|
}
|
||||||
|
h.buckets = append(h.buckets, sizeClass{lowerBound, upperBound, 0})
|
||||||
|
lowerBound *= growthFactor
|
||||||
|
}
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sizeHistogram) Add(size uint64) {
|
||||||
|
s.count++
|
||||||
|
s.totalSize += size
|
||||||
|
|
||||||
|
for i, bucket := range s.buckets {
|
||||||
|
if size >= bucket.lower && size <= bucket.upper {
|
||||||
|
s.buckets[i].count++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.oversized = append(s.oversized, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sizeHistogram) String() string {
|
||||||
|
var out strings.Builder
|
||||||
|
|
||||||
|
out.WriteString(fmt.Sprintf("Count: %d\n", s.count))
|
||||||
|
out.WriteString(fmt.Sprintf("Total Size: %s\n", ui.FormatBytes(s.totalSize)))
|
||||||
|
|
||||||
|
t := table.New()
|
||||||
|
t.AddColumn("Size", "{{.SizeRange}}")
|
||||||
|
t.AddColumn("Count", "{{.Count}}")
|
||||||
|
type line struct {
|
||||||
|
SizeRange string
|
||||||
|
Count int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// only print up to the highest used bucket size
|
||||||
|
lastFilledIdx := 0
|
||||||
|
for i := 0; i < len(s.buckets); i++ {
|
||||||
|
if s.buckets[i].count != 0 {
|
||||||
|
lastFilledIdx = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var lines []line
|
||||||
|
hasStarted := false
|
||||||
|
for i, b := range s.buckets {
|
||||||
|
if i > lastFilledIdx {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.count > 0 {
|
||||||
|
hasStarted = true
|
||||||
|
}
|
||||||
|
if hasStarted {
|
||||||
|
lines = append(lines, line{
|
||||||
|
SizeRange: fmt.Sprintf("%d - %d Byte", b.lower, b.upper),
|
||||||
|
Count: b.count,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
longestRange := 0
|
||||||
|
for _, l := range lines {
|
||||||
|
if longestRange < len(l.SizeRange) {
|
||||||
|
longestRange = len(l.SizeRange)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := range lines {
|
||||||
|
lines[i].SizeRange = strings.Repeat(" ", longestRange-len(lines[i].SizeRange)) + lines[i].SizeRange
|
||||||
|
t.AddRow(lines[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = t.Write(&out)
|
||||||
|
|
||||||
|
if len(s.oversized) > 0 {
|
||||||
|
out.WriteString(fmt.Sprintf("Oversized: %v\n", s.oversized))
|
||||||
|
}
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
62
cmd/restic/cmd_stats_test.go
Normal file
62
cmd/restic/cmd_stats_test.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSizeHistogramNew(t *testing.T) {
|
||||||
|
h := newSizeHistogram(42)
|
||||||
|
|
||||||
|
exp := &sizeHistogram{
|
||||||
|
count: 0,
|
||||||
|
totalSize: 0,
|
||||||
|
buckets: []sizeClass{
|
||||||
|
{0, 0, 0},
|
||||||
|
{1, 9, 0},
|
||||||
|
{10, 42, 0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Equals(t, exp, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeHistogramAdd(t *testing.T) {
|
||||||
|
h := newSizeHistogram(42)
|
||||||
|
for i := uint64(0); i < 45; i++ {
|
||||||
|
h.Add(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := &sizeHistogram{
|
||||||
|
count: 45,
|
||||||
|
totalSize: 990,
|
||||||
|
buckets: []sizeClass{
|
||||||
|
{0, 0, 1},
|
||||||
|
{1, 9, 9},
|
||||||
|
{10, 42, 33},
|
||||||
|
},
|
||||||
|
oversized: []uint64{43, 44},
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Equals(t, exp, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeHistogramString(t *testing.T) {
|
||||||
|
t.Run("overflow", func(t *testing.T) {
|
||||||
|
h := newSizeHistogram(42)
|
||||||
|
h.Add(8)
|
||||||
|
h.Add(50)
|
||||||
|
|
||||||
|
rtest.Equals(t, "Count: 2\nTotal Size: 58 B\nSize Count\n-----------------\n1 - 9 Byte 1\n-----------------\nOversized: [50]\n", h.String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("withZero", func(t *testing.T) {
|
||||||
|
h := newSizeHistogram(42)
|
||||||
|
h.Add(0)
|
||||||
|
h.Add(1)
|
||||||
|
h.Add(10)
|
||||||
|
|
||||||
|
rtest.Equals(t, "Count: 3\nTotal Size: 11 B\nSize Count\n-------------------\n 0 - 0 Byte 1\n 1 - 9 Byte 1\n10 - 42 Byte 1\n-------------------\n", h.String())
|
||||||
|
})
|
||||||
|
}
|
@@ -35,7 +35,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
|
|
||||||
// TagOptions bundles all options for the 'tag' command.
|
// TagOptions bundles all options for the 'tag' command.
|
||||||
type TagOptions struct {
|
type TagOptions struct {
|
||||||
snapshotFilterOptions
|
restic.SnapshotFilter
|
||||||
SetTags restic.TagLists
|
SetTags restic.TagLists
|
||||||
AddTags restic.TagLists
|
AddTags restic.TagLists
|
||||||
RemoveTags restic.TagLists
|
RemoveTags restic.TagLists
|
||||||
@@ -50,7 +50,7 @@ func init() {
|
|||||||
tagFlags.Var(&tagOptions.SetTags, "set", "`tags` which will replace the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
tagFlags.Var(&tagOptions.SetTags, "set", "`tags` which will replace the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
||||||
tagFlags.Var(&tagOptions.AddTags, "add", "`tags` which will be added to the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
tagFlags.Var(&tagOptions.AddTags, "add", "`tags` which will be added to the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
||||||
tagFlags.Var(&tagOptions.RemoveTags, "remove", "`tags` which will be removed from the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
tagFlags.Var(&tagOptions.RemoveTags, "remove", "`tags` which will be removed from the existing tags in the format `tag[,tag,...]` (can be given multiple times)")
|
||||||
initMultiSnapshotFilterOptions(tagFlags, &tagOptions.snapshotFilterOptions, true)
|
initMultiSnapshotFilter(tagFlags, &tagOptions.SnapshotFilter, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) {
|
func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) {
|
||||||
@@ -111,7 +111,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
|||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
Verbosef("create exclusive lock for repository\n")
|
Verbosef("create exclusive lock for repository\n")
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -119,7 +119,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
|||||||
}
|
}
|
||||||
|
|
||||||
changeCnt := 0
|
changeCnt := 0
|
||||||
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, opts.Hosts, opts.Tags, opts.Paths, args) {
|
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
|
||||||
changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten())
|
changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)
|
Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)
|
||||||
|
94
cmd/restic/cmd_tag_integration_test.go
Normal file
94
cmd/restic/cmd_tag_integration_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
|
||||||
|
rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTag(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a new backup, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 0,
|
||||||
|
"expected no tags, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original == nil,
|
||||||
|
"expected original ID to be nil, got %v", newest.Original)
|
||||||
|
originalID := *newest.ID
|
||||||
|
|
||||||
|
testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||||
|
"set failed, expected one NL tag, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||||
|
rtest.Assert(t, *newest.Original == originalID,
|
||||||
|
"expected original ID to be set to the first snapshot id")
|
||||||
|
|
||||||
|
testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
|
||||||
|
"add failed, expected CH,NL tags, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||||
|
rtest.Assert(t, *newest.Original == originalID,
|
||||||
|
"expected original ID to be set to the first snapshot id")
|
||||||
|
|
||||||
|
testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
|
||||||
|
"remove failed, expected one CH tag, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||||
|
rtest.Assert(t, *newest.Original == originalID,
|
||||||
|
"expected original ID to be set to the first snapshot id")
|
||||||
|
|
||||||
|
testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
|
||||||
|
testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 0,
|
||||||
|
"expected no tags, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||||
|
rtest.Assert(t, *newest.Original == originalID,
|
||||||
|
"expected original ID to be set to the first snapshot id")
|
||||||
|
|
||||||
|
// Check special case of removing all tags.
|
||||||
|
testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 0,
|
||||||
|
"expected no tags, got %v", newest.Tags)
|
||||||
|
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||||
|
rtest.Assert(t, *newest.Original == originalID,
|
||||||
|
"expected original ID to be set to the first snapshot id")
|
||||||
|
}
|
@@ -7,7 +7,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -17,6 +16,7 @@ import (
|
|||||||
"github.com/restic/restic/internal/fs"
|
"github.com/restic/restic/internal/fs"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/textfile"
|
"github.com/restic/restic/internal/textfile"
|
||||||
|
"github.com/restic/restic/internal/ui"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -364,7 +364,7 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
|
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
|
||||||
maxSize, err := parseSizeStr(maxSizeStr)
|
maxSize, err := ui.ParseBytes(maxSizeStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -385,35 +385,6 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSizeStr(sizeStr string) (int64, error) {
|
|
||||||
if sizeStr == "" {
|
|
||||||
return 0, errors.New("expected size, got empty string")
|
|
||||||
}
|
|
||||||
|
|
||||||
numStr := sizeStr[:len(sizeStr)-1]
|
|
||||||
var unit int64 = 1
|
|
||||||
|
|
||||||
switch sizeStr[len(sizeStr)-1] {
|
|
||||||
case 'b', 'B':
|
|
||||||
// use initialized values, do nothing here
|
|
||||||
case 'k', 'K':
|
|
||||||
unit = 1024
|
|
||||||
case 'm', 'M':
|
|
||||||
unit = 1024 * 1024
|
|
||||||
case 'g', 'G':
|
|
||||||
unit = 1024 * 1024 * 1024
|
|
||||||
case 't', 'T':
|
|
||||||
unit = 1024 * 1024 * 1024 * 1024
|
|
||||||
default:
|
|
||||||
numStr = sizeStr
|
|
||||||
}
|
|
||||||
value, err := strconv.ParseInt(numStr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return value * unit, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readExcludePatternsFromFiles reads all exclude files and returns the list of
|
// readExcludePatternsFromFiles reads all exclude files and returns the list of
|
||||||
// exclude patterns. For each line, leading and trailing white space is removed
|
// exclude patterns. For each line, leading and trailing white space is removed
|
||||||
// and comment lines are ignored. For each remaining pattern, environment
|
// and comment lines are ignored. For each remaining pattern, environment
|
||||||
|
@@ -187,54 +187,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseSizeStr(t *testing.T) {
|
|
||||||
sizeStrTests := []struct {
|
|
||||||
in string
|
|
||||||
expected int64
|
|
||||||
}{
|
|
||||||
{"1024", 1024},
|
|
||||||
{"1024b", 1024},
|
|
||||||
{"1024B", 1024},
|
|
||||||
{"1k", 1024},
|
|
||||||
{"100k", 102400},
|
|
||||||
{"100K", 102400},
|
|
||||||
{"10M", 10485760},
|
|
||||||
{"100m", 104857600},
|
|
||||||
{"20G", 21474836480},
|
|
||||||
{"10g", 10737418240},
|
|
||||||
{"2T", 2199023255552},
|
|
||||||
{"2t", 2199023255552},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range sizeStrTests {
|
|
||||||
actual, err := parseSizeStr(tt.in)
|
|
||||||
test.OK(t, err)
|
|
||||||
|
|
||||||
if actual != tt.expected {
|
|
||||||
t.Errorf("parseSizeStr(%s) = %d; expected %d", tt.in, actual, tt.expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseInvalidSizeStr(t *testing.T) {
|
|
||||||
invalidSizes := []string{
|
|
||||||
"",
|
|
||||||
" ",
|
|
||||||
"foobar",
|
|
||||||
"zzz",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range invalidSizes {
|
|
||||||
v, err := parseSizeStr(s)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("wanted error for invalid value %q, got nil", s)
|
|
||||||
}
|
|
||||||
if v != 0 {
|
|
||||||
t.Errorf("wanted zero for invalid value %q, got: %v", s, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIsExcludedByFileSize is for testing the instance of
|
// TestIsExcludedByFileSize is for testing the instance of
|
||||||
// --exclude-larger-than parameters
|
// --exclude-larger-than parameters
|
||||||
func TestIsExcludedByFileSize(t *testing.T) {
|
func TestIsExcludedByFileSize(t *testing.T) {
|
||||||
|
@@ -8,34 +8,28 @@ import (
|
|||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type snapshotFilterOptions struct {
|
// initMultiSnapshotFilter is used for commands that work on multiple snapshots
|
||||||
Hosts []string
|
|
||||||
Tags restic.TagLists
|
|
||||||
Paths []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// initMultiSnapshotFilterOptions is used for commands that work on multiple snapshots
|
|
||||||
// MUST be combined with restic.FindFilteredSnapshots or FindFilteredSnapshots
|
// MUST be combined with restic.FindFilteredSnapshots or FindFilteredSnapshots
|
||||||
func initMultiSnapshotFilterOptions(flags *pflag.FlagSet, options *snapshotFilterOptions, addHostShorthand bool) {
|
func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, addHostShorthand bool) {
|
||||||
hostShorthand := "H"
|
hostShorthand := "H"
|
||||||
if !addHostShorthand {
|
if !addHostShorthand {
|
||||||
hostShorthand = ""
|
hostShorthand = ""
|
||||||
}
|
}
|
||||||
flags.StringArrayVarP(&options.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)")
|
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)")
|
||||||
flags.Var(&options.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
|
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
|
||||||
flags.StringArrayVar(&options.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
|
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// initSingleSnapshotFilterOptions is used for commands that work on a single snapshot
|
// initSingleSnapshotFilter is used for commands that work on a single snapshot
|
||||||
// MUST be combined with restic.FindFilteredSnapshot
|
// MUST be combined with restic.FindFilteredSnapshot
|
||||||
func initSingleSnapshotFilterOptions(flags *pflag.FlagSet, options *snapshotFilterOptions) {
|
func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) {
|
||||||
flags.StringArrayVarP(&options.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||||
flags.Var(&options.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||||
flags.StringArrayVar(&options.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
|
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
|
||||||
func FindFilteredSnapshots(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, hosts []string, tags []restic.TagList, paths []string, snapshotIDs []string) <-chan *restic.Snapshot {
|
func FindFilteredSnapshots(ctx context.Context, be restic.Lister, loader restic.LoaderUnpacked, f *restic.SnapshotFilter, snapshotIDs []string) <-chan *restic.Snapshot {
|
||||||
out := make(chan *restic.Snapshot)
|
out := make(chan *restic.Snapshot)
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
@@ -45,7 +39,7 @@ func FindFilteredSnapshots(ctx context.Context, be restic.Lister, loader restic.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = restic.FindFilteredSnapshots(ctx, be, loader, hosts, tags, paths, snapshotIDs, func(id string, sn *restic.Snapshot, err error) error {
|
err = f.FindAll(ctx, be, loader, snapshotIDs, func(id string, sn *restic.Snapshot, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("Ignoring %q: %v\n", id, err)
|
Warnf("Ignoring %q: %v\n", id, err)
|
||||||
} else {
|
} else {
|
||||||
|
@@ -5,9 +5,10 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui"
|
||||||
)
|
)
|
||||||
|
|
||||||
func formatNode(path string, n *restic.Node, long bool) string {
|
func formatNode(path string, n *restic.Node, long bool, human bool) string {
|
||||||
if !long {
|
if !long {
|
||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
@@ -15,6 +16,13 @@ func formatNode(path string, n *restic.Node, long bool) string {
|
|||||||
var mode os.FileMode
|
var mode os.FileMode
|
||||||
var target string
|
var target string
|
||||||
|
|
||||||
|
var size string
|
||||||
|
if human {
|
||||||
|
size = ui.FormatBytes(n.Size)
|
||||||
|
} else {
|
||||||
|
size = fmt.Sprintf("%6d", n.Size)
|
||||||
|
}
|
||||||
|
|
||||||
switch n.Type {
|
switch n.Type {
|
||||||
case "file":
|
case "file":
|
||||||
mode = 0
|
mode = 0
|
||||||
@@ -33,8 +41,8 @@ func formatNode(path string, n *restic.Node, long bool) string {
|
|||||||
mode = os.ModeSocket
|
mode = os.ModeSocket
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s %5d %5d %6d %s %s%s",
|
return fmt.Sprintf("%s %5d %5d %s %s %s%s",
|
||||||
mode|n.Mode, n.UID, n.GID, n.Size,
|
mode|n.Mode, n.UID, n.GID, size,
|
||||||
n.ModTime.Local().Format(TimeFormat), path,
|
n.ModTime.Local().Format(TimeFormat), path,
|
||||||
target)
|
target)
|
||||||
}
|
}
|
||||||
|
61
cmd/restic/format_test.go
Normal file
61
cmd/restic/format_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFormatNode(t *testing.T) {
|
||||||
|
// overwrite time zone to ensure the data is formatted reproducibly
|
||||||
|
tz := time.Local
|
||||||
|
time.Local = time.UTC
|
||||||
|
defer func() {
|
||||||
|
time.Local = tz
|
||||||
|
}()
|
||||||
|
|
||||||
|
testPath := "/test/path"
|
||||||
|
node := restic.Node{
|
||||||
|
Name: "baz",
|
||||||
|
Type: "file",
|
||||||
|
Size: 14680064,
|
||||||
|
UID: 1000,
|
||||||
|
GID: 2000,
|
||||||
|
ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range []struct {
|
||||||
|
path string
|
||||||
|
restic.Node
|
||||||
|
long bool
|
||||||
|
human bool
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
path: testPath,
|
||||||
|
Node: node,
|
||||||
|
long: false,
|
||||||
|
human: false,
|
||||||
|
expect: testPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: testPath,
|
||||||
|
Node: node,
|
||||||
|
long: true,
|
||||||
|
human: false,
|
||||||
|
expect: "---------- 1000 2000 14680064 2020-01-02 03:04:05 " + testPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: testPath,
|
||||||
|
Node: node,
|
||||||
|
long: true,
|
||||||
|
human: true,
|
||||||
|
expect: "---------- 1000 2000 14.000 MiB 2020-01-02 03:04:05 " + testPath,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
r := formatNode(c.path, &c.Node, c.long, c.human)
|
||||||
|
rtest.Equals(t, c.expect, r)
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user