mirror of
https://github.com/restic/restic.git
synced 2025-08-25 19:57:35 +00:00
Compare commits
673 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
fe9f142b52 | ||
![]() |
6ae760751a | ||
![]() |
2fa1b42706 | ||
![]() |
ca04a88e65 | ||
![]() |
12e858b7af | ||
![]() |
834f08fe2d | ||
![]() |
814ef4901f | ||
![]() |
84bc9432de | ||
![]() |
e9d711422a | ||
![]() |
0f9fa44de5 | ||
![]() |
3786536dc1 | ||
![]() |
811be5984d | ||
![]() |
b0ead75de5 | ||
![]() |
6cd2804bff | ||
![]() |
a72c2b74f3 | ||
![]() |
261b1455c7 | ||
![]() |
2a0bd2b637 | ||
![]() |
4589da7eb9 | ||
![]() |
75e72d826c | ||
![]() |
d8916bc3d9 | ||
![]() |
dc11d012bb | ||
![]() |
8ef5425351 | ||
![]() |
885431ec2b | ||
![]() |
cb85fb46dd | ||
![]() |
2f30c940b2 | ||
![]() |
0ea62b5ac6 | ||
![]() |
29e1caf825 | ||
![]() |
0164f5310d | ||
![]() |
0ec9383ba2 | ||
![]() |
abca112404 | ||
![]() |
b70b94507a | ||
![]() |
d987582594 | ||
![]() |
ef2e473b99 | ||
![]() |
e4bbde7036 | ||
![]() |
ec0fb46f6c | ||
![]() |
103beb96bc | ||
![]() |
f0f89d7f27 | ||
![]() |
cf352ccafb | ||
![]() |
b856e9489a | ||
![]() |
ce7db90e08 | ||
![]() |
620518aec6 | ||
![]() |
f2fafbffaa | ||
![]() |
7a3a884874 | ||
![]() |
772a907533 | ||
![]() |
a9446c1184 | ||
![]() |
1bab29c336 | ||
![]() |
e886c3f6b2 | ||
![]() |
c95de54726 | ||
![]() |
d4b8abd3e2 | ||
![]() |
948ab3ccaf | ||
![]() |
bb0c923298 | ||
![]() |
ff0c975443 | ||
![]() |
7e61e117d6 | ||
![]() |
220a28582e | ||
![]() |
f44fd73230 | ||
![]() |
76bd975e03 | ||
![]() |
64b7aed362 | ||
![]() |
3fa6b2de4a | ||
![]() |
5cd000f4b0 | ||
![]() |
59fe24cb2b | ||
![]() |
1a5efcf680 | ||
![]() |
d33fe6dd3c | ||
![]() |
c8dd95f104 | ||
![]() |
7d980b469d | ||
![]() |
d863234e3e | ||
![]() |
4be45de1c2 | ||
![]() |
8c1125fe13 | ||
![]() |
0b6ccea461 | ||
![]() |
de6135351e | ||
![]() |
d47581b25e | ||
![]() |
69dec02a14 | ||
![]() |
826d880614 | ||
![]() |
dbf7ef72b9 | ||
![]() |
27ec320eae | ||
![]() |
baca3f6303 | ||
![]() |
524c2721b4 | ||
![]() |
be1b978ac8 | ||
![]() |
d4d9c1b8f1 | ||
![]() |
ead8dd0173 | ||
![]() |
cd09ef4614 | ||
![]() |
d399e32590 | ||
![]() |
54a4034ec0 | ||
![]() |
138b7b3328 | ||
![]() |
6d19e0260d | ||
![]() |
85abceb99c | ||
![]() |
85c15e6fa3 | ||
![]() |
d6917c7e00 | ||
![]() |
8c20301172 | ||
![]() |
4b4f916bdc | ||
![]() |
9707956375 | ||
![]() |
d1d4510974 | ||
![]() |
a28940ea29 | ||
![]() |
db26dc75e1 | ||
![]() |
efef38d0e8 | ||
![]() |
d00e72fed4 | ||
![]() |
62af0d769a | ||
![]() |
ae83a9002a | ||
![]() |
ceff4af1ac | ||
![]() |
b15ba553a4 | ||
![]() |
46cb1df1bc | ||
![]() |
bd3816fa14 | ||
![]() |
b2b0856908 | ||
![]() |
7f05af02b9 | ||
![]() |
eabc177a42 | ||
![]() |
ab6defbace | ||
![]() |
fe1f61570b | ||
![]() |
baf9b54891 | ||
![]() |
6a4d6d5da4 | ||
![]() |
41f70f1f4f | ||
![]() |
6c7560e537 | ||
![]() |
0f97356b21 | ||
![]() |
2089c54310 | ||
![]() |
f1877e721e | ||
![]() |
17f2301cc2 | ||
![]() |
79deb99605 | ||
![]() |
643180b415 | ||
![]() |
d27cfd10a9 | ||
![]() |
34f3b13b7c | ||
![]() |
495982232c | ||
![]() |
d173d1c532 | ||
![]() |
f955222750 | ||
![]() |
cb9cbe55d9 | ||
![]() |
f750aa8dfb | ||
![]() |
c635e30e3f | ||
![]() |
f5d5e8fd0a | ||
![]() |
a858ab254b | ||
![]() |
4087c3aff7 | ||
![]() |
aa86c76aab | ||
![]() |
76ef94d15c | ||
![]() |
91aef00df3 | ||
![]() |
b0da0f152f | ||
![]() |
75f6bd89ed | ||
![]() |
3fd0ad7448 | ||
![]() |
b6593ad7df | ||
![]() |
ed65a7dbca | ||
![]() |
eac1c4a8d0 | ||
![]() |
f519454f33 | ||
![]() |
54ae8a0c40 | ||
![]() |
a36b5b6391 | ||
![]() |
5e36e4da96 | ||
![]() |
8ee08e5d09 | ||
![]() |
9f9f736ec2 | ||
![]() |
af98c3ccbe | ||
![]() |
6edfc73879 | ||
![]() |
d985ed27d1 | ||
![]() |
4278ec6553 | ||
![]() |
15cb498c47 | ||
![]() |
59e217b003 | ||
![]() |
3457f50c8c | ||
![]() |
bbe2f1ecf2 | ||
![]() |
362917afb9 | ||
![]() |
b92ab458b0 | ||
![]() |
2657217574 | ||
![]() |
02ab511c2f | ||
![]() |
6e586b64e4 | ||
![]() |
fb4d458cce | ||
![]() |
c7b5ddc012 | ||
![]() |
3eb825e47c | ||
![]() |
4d60011030 | ||
![]() |
507ed32469 | ||
![]() |
22fdfe1ffe | ||
![]() |
e05cd9abca | ||
![]() |
ea55ca5303 | ||
![]() |
df53f4782b | ||
![]() |
35055adbc4 | ||
![]() |
bd9eb528c0 | ||
![]() |
02032f3109 | ||
![]() |
cfff1367c1 | ||
![]() |
353265a329 | ||
![]() |
84a1170dee | ||
![]() |
0b4d9c9a51 | ||
![]() |
5422a7daa5 | ||
![]() |
691c01963b | ||
![]() |
2bec99dc6f | ||
![]() |
e60c5b2d7f | ||
![]() |
a04964bb86 | ||
![]() |
fe54912a46 | ||
![]() |
feb6abb7bb | ||
![]() |
aaf5254e26 | ||
![]() |
705556f134 | ||
![]() |
c23eebc225 | ||
![]() |
e09f6f540f | ||
![]() |
0fca028491 | ||
![]() |
57a08291f5 | ||
![]() |
2117ce4cfb | ||
![]() |
3a478bc522 | ||
![]() |
9a7704fa2b | ||
![]() |
9a69f44de2 | ||
![]() |
7a6339180b | ||
![]() |
82e6e28781 | ||
![]() |
eb33e564c9 | ||
![]() |
62680af734 | ||
![]() |
68460fd3d1 | ||
![]() |
9459328d8d | ||
![]() |
9d71990c26 | ||
![]() |
9219b6a0ef | ||
![]() |
9a0ec05145 | ||
![]() |
2a5b6af2e9 | ||
![]() |
51e4e5ef82 | ||
![]() |
e998314088 | ||
![]() |
be03c1a457 | ||
![]() |
95050117eb | ||
![]() |
21a3a41b69 | ||
![]() |
3f919f2371 | ||
![]() |
50b43fbac0 | ||
![]() |
f689e2638d | ||
![]() |
f9ef2b1e44 | ||
![]() |
b87a37f318 | ||
![]() |
647ebf352a | ||
![]() |
62d3ef4a93 | ||
![]() |
6c5fc32967 | ||
![]() |
ae13cf15c1 | ||
![]() |
1fe1ec40a2 | ||
![]() |
32f5ee6f4e | ||
![]() |
5bd8a6d7eb | ||
![]() |
4a33370072 | ||
![]() |
98fb56baa6 | ||
![]() |
f12bbd9229 | ||
![]() |
6adb629608 | ||
![]() |
25ff9fa893 | ||
![]() |
bdaec8fdb8 | ||
![]() |
55b440b520 | ||
![]() |
3f63b53090 | ||
![]() |
76253b2a20 | ||
![]() |
20e82d1fcf | ||
![]() |
96eada3d5f | ||
![]() |
b8f4267a36 | ||
![]() |
1f6883a05c | ||
![]() |
8154f6a77a | ||
![]() |
184b7616ba | ||
![]() |
67f237b4f3 | ||
![]() |
090f9d6237 | ||
![]() |
321cc35cde | ||
![]() |
a97915642c | ||
![]() |
c64d81063e | ||
![]() |
233b841ad9 | ||
![]() |
85860e6e97 | ||
![]() |
2dd6769429 | ||
![]() |
5d9b0d894e | ||
![]() |
d4bf7a3cb1 | ||
![]() |
24c8a33da9 | ||
![]() |
399f8e84a1 | ||
![]() |
6a436d731d | ||
![]() |
b2fcbc21cb | ||
![]() |
05e5e29a8c | ||
![]() |
f490288738 | ||
![]() |
51718ec561 | ||
![]() |
11eb88a2ea | ||
![]() |
bee3231ed4 | ||
![]() |
60d8066568 | ||
![]() |
08dea911bd | ||
![]() |
47206a6579 | ||
![]() |
594689db32 | ||
![]() |
5705326bb8 | ||
![]() |
978ebaac49 | ||
![]() |
307aeb6849 | ||
![]() |
9cd85d5956 | ||
![]() |
c9f506925c | ||
![]() |
e990d3d483 | ||
![]() |
7042190807 | ||
![]() |
c158741e2e | ||
![]() |
89fbd39e59 | ||
![]() |
1ce599d2ae | ||
![]() |
789fec3da7 | ||
![]() |
8a120c8800 | ||
![]() |
41395e83c5 | ||
![]() |
2a193195b0 | ||
![]() |
229c7b24a4 | ||
![]() |
b34ce57dd4 | ||
![]() |
1ce839228e | ||
![]() |
fb1170c1d6 | ||
![]() |
e457fe22bc | ||
![]() |
39299e36ef | ||
![]() |
0512f292e8 | ||
![]() |
3a93e28605 | ||
![]() |
6b82cce1bd | ||
![]() |
31e07cecbb | ||
![]() |
c181b51360 | ||
![]() |
ccd19b7e88 | ||
![]() |
b0987ff570 | ||
![]() |
eff3124f15 | ||
![]() |
43fa051546 | ||
![]() |
2293835242 | ||
![]() |
0fcb6c7f94 | ||
![]() |
325fa916b5 | ||
![]() |
170e495334 | ||
![]() |
9d44682e3e | ||
![]() |
4d43509423 | ||
![]() |
ea9ad77e05 | ||
![]() |
cc84884d2e | ||
![]() |
4a5ae2ba84 | ||
![]() |
1f1e50f49e | ||
![]() |
f3c3b0f377 | ||
![]() |
7e2be9e081 | ||
![]() |
cc3c218baf | ||
![]() |
c1578a2035 | ||
![]() |
678b983300 | ||
![]() |
1b3870dc43 | ||
![]() |
ef40aee2bd | ||
![]() |
3a32c4e59f | ||
![]() |
e703e89e9b | ||
![]() |
389f6ee74c | ||
![]() |
bbac74b172 | ||
![]() |
a280b7364e | ||
![]() |
825651a135 | ||
![]() |
e36d17a6f8 | ||
![]() |
068b115abc | ||
![]() |
41a5bf357f | ||
![]() |
f96896a9c0 | ||
![]() |
2ab9a3b9c3 | ||
![]() |
dbe2eef80c | ||
![]() |
6e7c6674ad | ||
![]() |
7fe830ee1e | ||
![]() |
a07c7166ba | ||
![]() |
d2f8f9de23 | ||
![]() |
d5fe5107c8 | ||
![]() |
f08ba1a005 | ||
![]() |
70fb554854 | ||
![]() |
8c02ebb029 | ||
![]() |
8dcb0c4a9d | ||
![]() |
74ca82a6f8 | ||
![]() |
9cbc2502c6 | ||
![]() |
93038ed8f4 | ||
![]() |
8da5a6649b | ||
![]() |
3888c21a27 | ||
![]() |
1257c2c075 | ||
![]() |
182b9796e4 | ||
![]() |
cbf87fbdb3 | ||
![]() |
b5511e8e4c | ||
![]() |
50e0d5e6b5 | ||
![]() |
705ad51bcc | ||
![]() |
13a8b5822f | ||
![]() |
3d3bb88745 | ||
![]() |
3a3cf608f5 | ||
![]() |
19ac12d95b | ||
![]() |
3325a7c862 | ||
![]() |
555be49a79 | ||
![]() |
9aa9e0d1ec | ||
![]() |
7d12c29286 | ||
![]() |
56836364a4 | ||
![]() |
4df77e9f26 | ||
![]() |
2545c84321 | ||
![]() |
191c47d30e | ||
![]() |
dd1ef13c1c | ||
![]() |
4d5ee987a7 | ||
![]() |
b2ed42cec4 | ||
![]() |
61042a77a4 | ||
![]() |
4e9e2c3229 | ||
![]() |
faec0ff816 | ||
![]() |
07d1f8047e | ||
![]() |
b2b0760eb0 | ||
![]() |
cf16239058 | ||
![]() |
1531eab746 | ||
![]() |
d54176ce5d | ||
![]() |
a9aff885d6 | ||
![]() |
bb20078641 | ||
![]() |
237f32c651 | ||
![]() |
74e4656850 | ||
![]() |
c37d587f81 | ||
![]() |
ffc6b3d887 | ||
![]() |
88c63a029c | ||
![]() |
0b908bb1fb | ||
![]() |
0372c7ef04 | ||
![]() |
9464c63550 | ||
![]() |
6ebf2dd235 | ||
![]() |
5f153109ba | ||
![]() |
2beaa74892 | ||
![]() |
55c21846b1 | ||
![]() |
0f80b6a137 | ||
![]() |
e14ccb1142 | ||
![]() |
609367195a | ||
![]() |
18eb1d3ab0 | ||
![]() |
32a6b66267 | ||
![]() |
f903db492c | ||
![]() |
25a0be7f26 | ||
![]() |
a27b7f1370 | ||
![]() |
fa361dbfbd | ||
![]() |
5260d38980 | ||
![]() |
2f7b4ceae1 | ||
![]() |
aea7538936 | ||
![]() |
49a6a4f5bf | ||
![]() |
237d00000e | ||
![]() |
0c727f6ad1 | ||
![]() |
4e7d3efad4 | ||
![]() |
17446da5fd | ||
![]() |
a3cee840d2 | ||
![]() |
c76f3a1e27 | ||
![]() |
1e9714088d | ||
![]() |
58e3f5955c | ||
![]() |
dd8d8b1ae0 | ||
![]() |
e1ac0f0e0c | ||
![]() |
a88d90b8e5 | ||
![]() |
2fcb3947df | ||
![]() |
eef0ee7a85 | ||
![]() |
f1b73c9301 | ||
![]() |
ac1dfc99bb | ||
![]() |
098de3554c | ||
![]() |
8812dcd56a | ||
![]() |
379282299a | ||
![]() |
0c796dbd9b | ||
![]() |
f9dded83b3 | ||
![]() |
88a10a368f | ||
![]() |
9a7056a479 | ||
![]() |
fc05e35a08 | ||
![]() |
f1c388c623 | ||
![]() |
12141afbad | ||
![]() |
fed33295c3 | ||
![]() |
b217f38ee7 | ||
![]() |
0c1240360d | ||
![]() |
ffca602315 | ||
![]() |
da419be43c | ||
![]() |
8f1ca8fabe | ||
![]() |
f74dad2afb | ||
![]() |
3e287afdbf | ||
![]() |
06894484a1 | ||
![]() |
6e5b42d5c4 | ||
![]() |
ed5b2c2c9b | ||
![]() |
13c32b0fbe | ||
![]() |
9747cef338 | ||
![]() |
8e913e6d3a | ||
![]() |
b93459cbb0 | ||
![]() |
692f81ede8 | ||
![]() |
7a268e4aba | ||
![]() |
4b3a0b4104 | ||
![]() |
cebce52c16 | ||
![]() |
675a49a95b | ||
![]() |
e2dba9f5c7 | ||
![]() |
06fd6b54d7 | ||
![]() |
419e6f26b1 | ||
![]() |
c3212ab6a6 | ||
![]() |
658aa4c0f7 | ||
![]() |
998cf5a7f8 | ||
![]() |
7eec91f841 | ||
![]() |
51dc80be5b | ||
![]() |
ddbc0c1b37 | ||
![]() |
ecbf8e055c | ||
![]() |
16ba237d8b | ||
![]() |
a466e945d9 | ||
![]() |
03b9764bce | ||
![]() |
22c9276719 | ||
![]() |
1e33b285c1 | ||
![]() |
c05f96e6b9 | ||
![]() |
94752b7ee2 | ||
![]() |
0058745881 | ||
![]() |
a719d10e22 | ||
![]() |
b0a01ae68a | ||
![]() |
472bf5184f | ||
![]() |
d1a5ec7839 | ||
![]() |
1514593f22 | ||
![]() |
5e4e268bdc | ||
![]() |
3252f60df5 | ||
![]() |
2fa8b96843 | ||
![]() |
7a01bd3b67 | ||
![]() |
319087c056 | ||
![]() |
6ed73ed408 | ||
![]() |
c832a492ac | ||
![]() |
e01baeabba | ||
![]() |
bfc9c6c971 | ||
![]() |
5773b86d02 | ||
![]() |
a013014c24 | ||
![]() |
f9850b79b5 | ||
![]() |
2f518b7241 | ||
![]() |
49be202cb0 | ||
![]() |
19ebc1b786 | ||
![]() |
23a122a901 | ||
![]() |
e77002f841 | ||
![]() |
d05f6211d1 | ||
![]() |
ee3c55ea3d | ||
![]() |
db046c0acc | ||
![]() |
3e6a26e2e9 | ||
![]() |
65c5e511a1 | ||
![]() |
6d10c655a0 | ||
![]() |
bb40e49e75 | ||
![]() |
fefe15d7a1 | ||
![]() |
78e5aa6d30 | ||
![]() |
1cb11ad8ad | ||
![]() |
90a663c94f | ||
![]() |
88a7231217 | ||
![]() |
c0627dc80d | ||
![]() |
e71367e6b9 | ||
![]() |
5aa37acdaa | ||
![]() |
9c64a95df8 | ||
![]() |
7c8dd61e8c | ||
![]() |
f6cc10578d | ||
![]() |
4ce87a7f64 | ||
![]() |
e17ee40a31 | ||
![]() |
1bd1f3008d | ||
![]() |
38dac78180 | ||
![]() |
bc2399fbd9 | ||
![]() |
1a9705fc95 | ||
![]() |
8c4caf09a8 | ||
![]() |
375189488c | ||
![]() |
903651c719 | ||
![]() |
118d599d0a | ||
![]() |
db459eda21 | ||
![]() |
a14a63cd29 | ||
![]() |
947f0c345e | ||
![]() |
d23a2e1925 | ||
![]() |
08ae708b3b | ||
![]() |
99a05d5ab2 | ||
![]() |
6557f36f61 | ||
![]() |
5f58797ba7 | ||
![]() |
9cef6b4c69 | ||
![]() |
8a78a042db | ||
![]() |
b491af2b57 | ||
![]() |
d747a9c401 | ||
![]() |
888c1ae63a | ||
![]() |
5eeb257c95 | ||
![]() |
ae6729cf89 | ||
![]() |
6abd494915 | ||
![]() |
7147a54ceb | ||
![]() |
8d971172c4 | ||
![]() |
37d0e323eb | ||
![]() |
face5bd7f7 | ||
![]() |
1daf928a77 | ||
![]() |
37bab08181 | ||
![]() |
6dc2324d2e | ||
![]() |
850cd9aace | ||
![]() |
b50ff04cf3 | ||
![]() |
c8641f4479 | ||
![]() |
ac7ac0cb97 | ||
![]() |
306a29980a | ||
![]() |
41cc320145 | ||
![]() |
cdb0fb9c06 | ||
![]() |
94cbc6392d | ||
![]() |
78a1757e5a | ||
![]() |
22562d2132 | ||
![]() |
51d823348d | ||
![]() |
831f593b87 | ||
![]() |
179e11c2ae | ||
![]() |
ebba233a3a | ||
![]() |
8479390d7c | ||
![]() |
756f43d5f9 | ||
![]() |
affd04c125 | ||
![]() |
f27750e270 | ||
![]() |
1dd873b706 | ||
![]() |
7a60d9e54f | ||
![]() |
3001dd8c2b | ||
![]() |
4503aea0df | ||
![]() |
09cddb8927 | ||
![]() |
913eab3361 | ||
![]() |
a9c7c12276 | ||
![]() |
85eef232e6 | ||
![]() |
a06d927dce | ||
![]() |
fd3ed9e2f4 | ||
![]() |
6042df075f | ||
![]() |
c934c99d41 | ||
![]() |
616926d2c1 | ||
![]() |
05abc6d6f5 | ||
![]() |
45244fdf68 | ||
![]() |
803640ba4b | ||
![]() |
8e1e3844aa | ||
![]() |
8b5ab5b59f | ||
![]() |
4703473ec5 | ||
![]() |
8bfc2519d7 | ||
![]() |
ba16904eed | ||
![]() |
2841a87cc6 | ||
![]() |
fab4a8a4d2 | ||
![]() |
6d6c04abef | ||
![]() |
49e32f3f8a | ||
![]() |
9412f37e50 | ||
![]() |
087cf7e114 | ||
![]() |
e604939e72 | ||
![]() |
37aca6bec0 | ||
![]() |
bdcafbc11c | ||
![]() |
ba33e41068 | ||
![]() |
4661f45a8c | ||
![]() |
2091fc0dde | ||
![]() |
9238dcc81a | ||
![]() |
71537da4b0 | ||
![]() |
ce51d2f3c0 | ||
![]() |
403b7ca2be | ||
![]() |
309cf0586a | ||
![]() |
9f9e91eb0d | ||
![]() |
1f43003cc1 | ||
![]() |
8ce5f29758 | ||
![]() |
8afc117aa3 | ||
![]() |
cf1cc1fb72 | ||
![]() |
64233ca0a7 | ||
![]() |
ea59896bd6 | ||
![]() |
806a0cdce3 | ||
![]() |
faa83db9e4 | ||
![]() |
9358a5fb37 | ||
![]() |
71c9516b26 | ||
![]() |
17ac91fba3 | ||
![]() |
26a3c47c5c | ||
![]() |
7896e50301 | ||
![]() |
9584cbda90 | ||
![]() |
cacc48fc09 | ||
![]() |
7fbaca577b | ||
![]() |
1f9dd84d1e | ||
![]() |
c8ff5592b8 | ||
![]() |
f875a8843d | ||
![]() |
0ed5c20c57 | ||
![]() |
21edbdc3ac | ||
![]() |
220d937975 | ||
![]() |
5f13bbc118 | ||
![]() |
9672670756 | ||
![]() |
5ac24a9744 | ||
![]() |
99e247caa6 | ||
![]() |
0c705e07db | ||
![]() |
024d01d85b | ||
![]() |
0666fa11b8 | ||
![]() |
caa0e89114 | ||
![]() |
46a7072f3f | ||
![]() |
1f12915b0c | ||
![]() |
a0885d5d69 | ||
![]() |
5dccab701a | ||
![]() |
5069c9edd9 | ||
![]() |
1a584cb16e | ||
![]() |
84ede6ad7a | ||
![]() |
b7f03d01b8 | ||
![]() |
eaceaca113 | ||
![]() |
a7ac9a4769 | ||
![]() |
23ed03a267 | ||
![]() |
aac8c5a7ca | ||
![]() |
7c8a401d97 | ||
![]() |
d83332315c | ||
![]() |
c2703e5024 | ||
![]() |
eb7dbc88b5 | ||
![]() |
e02aadf1d2 | ||
![]() |
c4e6b198ae | ||
![]() |
f47c8eebb7 | ||
![]() |
996e2ac7c5 | ||
![]() |
a67d3781a3 | ||
![]() |
153a73ebba | ||
![]() |
f9d6e3a035 | ||
![]() |
51656e8764 | ||
![]() |
9e23200bff | ||
![]() |
b884643b40 | ||
![]() |
5be4845710 | ||
![]() |
cfa3c6abc5 | ||
![]() |
f499e66032 | ||
![]() |
00575ecffe | ||
![]() |
cb5694d136 | ||
![]() |
100b06d806 | ||
![]() |
667536cea4 | ||
![]() |
ba183c44c3 | ||
![]() |
32e6a438be | ||
![]() |
b77b0749fa | ||
![]() |
6aca7dac21 | ||
![]() |
8161605f1b | ||
![]() |
b78607c9d8 | ||
![]() |
c1101ede19 | ||
![]() |
f646406822 | ||
![]() |
9888443f5c | ||
![]() |
cf6dfd6d36 | ||
![]() |
a7786c67f1 | ||
![]() |
15b7d9c80b | ||
![]() |
ee4128281e | ||
![]() |
3c8aefa0cb | ||
![]() |
e2df73b0ac | ||
![]() |
f4329a20f6 | ||
![]() |
11ebc0c5db | ||
![]() |
f137be42fe | ||
![]() |
0ce182f044 | ||
![]() |
1b50faf03e | ||
![]() |
4cbbf5d952 | ||
![]() |
2885db7902 | ||
![]() |
acb40d2b94 | ||
![]() |
fa73b50b45 | ||
![]() |
2d700c3887 | ||
![]() |
91251f2d57 | ||
![]() |
3df4ec7c61 | ||
![]() |
b1d3a1a5e3 | ||
![]() |
4f31c2699d | ||
![]() |
96b1ff5e38 | ||
![]() |
6b5ffce9dc | ||
![]() |
590eb9efd7 | ||
![]() |
55c4ca66f7 | ||
![]() |
56ad761b19 | ||
![]() |
d129baba7a | ||
![]() |
febb32b5b4 |
@@ -1,12 +0,0 @@
|
|||||||
# Folders
|
|
||||||
.git/
|
|
||||||
.github/
|
|
||||||
changelog/
|
|
||||||
doc/
|
|
||||||
docker/
|
|
||||||
helpers/
|
|
||||||
|
|
||||||
# Files
|
|
||||||
.gitignore
|
|
||||||
.golangci.yml
|
|
||||||
*.md
|
|
35
.github/ISSUE_TEMPLATE/Bug.md
vendored
35
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -32,23 +32,30 @@ Output of `restic version`
|
|||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
|
|
||||||
How did you run restic exactly?
|
|
||||||
-------------------------------
|
What backend/service did you use to store the repository?
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Problem description / Steps to reproduce
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
This section should include at least:
|
This section should include at least:
|
||||||
|
|
||||||
|
* A description of the problem you are having with restic.
|
||||||
|
|
||||||
* The complete command line and any environment variables you used to
|
* The complete command line and any environment variables you used to
|
||||||
configure restic's backend access. Make sure to replace sensitive values!
|
configure restic's backend access. Make sure to replace sensitive values!
|
||||||
|
|
||||||
* The output of the commands, what restic prints gives may give us much
|
* The output of the commands, what restic prints gives may give us much
|
||||||
information to diagnose the problem!
|
information to diagnose the problem!
|
||||||
|
|
||||||
|
* The more time you spend describing an easy way to reproduce the behavior (if
|
||||||
|
this is possible), the easier it is for the project developers to fix it!
|
||||||
-->
|
-->
|
||||||
|
|
||||||
What backend/server/service did you use to store the repository?
|
|
||||||
----------------------------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Expected behavior
|
Expected behavior
|
||||||
-----------------
|
-----------------
|
||||||
@@ -65,22 +72,12 @@ In this section, please try to concentrate on observations, so only describe
|
|||||||
what you observed directly.
|
what you observed directly.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
Steps to reproduce the behavior
|
|
||||||
-------------------------------
|
|
||||||
|
|
||||||
<!--
|
|
||||||
The more time you spend describing an easy way to reproduce the behavior (if
|
|
||||||
this is possible), the easier it is for the project developers to fix it!
|
|
||||||
-->
|
|
||||||
|
|
||||||
Do you have any idea what may have caused this?
|
Do you have any idea what may have caused this?
|
||||||
-----------------------------------------------
|
-----------------------------------------------
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Did something noteworthy happen on your system, Internet connection, backend services, etc?
|
||||||
Do you have an idea how to solve the issue?
|
-->
|
||||||
-------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Did restic help you today? Did it make you happy in any way?
|
Did restic help you today? Did it make you happy in any way?
|
||||||
|
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -4,10 +4,10 @@ updates:
|
|||||||
- package-ecosystem: "gomod"
|
- package-ecosystem: "gomod"
|
||||||
directory: "/" # Location of package manifests
|
directory: "/" # Location of package manifests
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "monthly"
|
||||||
|
|
||||||
# Dependencies listed in .github/workflows/*.yml
|
# Dependencies listed in .github/workflows/*.yml
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: "github-actions"
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "monthly"
|
||||||
|
66
.github/workflows/docker.yml
vendored
Normal file
66
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
|
||||||
|
name: Create and publish a Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
if: github.repository == 'restic/restic'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
|
||||||
|
|
||||||
|
- name: Ensure consistent binaries
|
||||||
|
run: |
|
||||||
|
echo "removing git directory for consistency with release binaries"
|
||||||
|
rm -rf .git
|
||||||
|
# remove VCS information from release builds, keep VCS for nightly builds on master
|
||||||
|
if: github.ref != 'refs/heads/master'
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: .
|
||||||
|
file: docker/Dockerfile.release
|
||||||
|
platforms: linux/386,linux/amd64,linux/arm,linux/arm64
|
||||||
|
pull: true
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
103
.github/workflows/tests.yml
vendored
103
.github/workflows/tests.yml
vendored
@@ -7,9 +7,13 @@ on:
|
|||||||
|
|
||||||
# run tests for all pull requests
|
# run tests for all pull requests
|
||||||
pull_request:
|
pull_request:
|
||||||
|
merge_group:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
env:
|
env:
|
||||||
latest_go: "1.19.x"
|
latest_go: "1.21.x"
|
||||||
GO111MODULE: on
|
GO111MODULE: on
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -19,29 +23,34 @@ jobs:
|
|||||||
# list of jobs to run:
|
# list of jobs to run:
|
||||||
include:
|
include:
|
||||||
- job_name: Windows
|
- job_name: Windows
|
||||||
go: 1.19.x
|
go: 1.21.x
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
|
|
||||||
- job_name: macOS
|
- job_name: macOS
|
||||||
go: 1.19.x
|
go: 1.21.x
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
test_fuse: false
|
test_fuse: false
|
||||||
|
|
||||||
- job_name: Linux
|
- job_name: Linux
|
||||||
go: 1.19.x
|
go: 1.21.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
test_cloud_backends: true
|
test_cloud_backends: true
|
||||||
test_fuse: true
|
test_fuse: true
|
||||||
check_changelog: true
|
check_changelog: true
|
||||||
|
|
||||||
- job_name: Linux (race)
|
- job_name: Linux (race)
|
||||||
go: 1.19.x
|
go: 1.21.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
test_fuse: true
|
test_fuse: true
|
||||||
test_opts: "-race"
|
test_opts: "-race"
|
||||||
|
|
||||||
- job_name: Linux
|
- job_name: Linux
|
||||||
go: 1.18.x
|
go: 1.20.x
|
||||||
|
os: ubuntu-latest
|
||||||
|
test_fuse: true
|
||||||
|
|
||||||
|
- job_name: Linux
|
||||||
|
go: 1.19.x
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
test_fuse: true
|
test_fuse: true
|
||||||
|
|
||||||
@@ -53,7 +62,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ matrix.go }}
|
- name: Set up Go ${{ matrix.go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
@@ -126,12 +135,20 @@ jobs:
|
|||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
|
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build with build.go
|
- name: Build with build.go
|
||||||
run: |
|
run: |
|
||||||
go run build.go
|
go run build.go
|
||||||
|
|
||||||
|
- name: Minimal test
|
||||||
|
run: |
|
||||||
|
./restic init
|
||||||
|
./restic backup .
|
||||||
|
env:
|
||||||
|
RESTIC_REPOSITORY: ../testrepo
|
||||||
|
RESTIC_PASSWORD: password
|
||||||
|
|
||||||
- name: Run local Tests
|
- name: Run local Tests
|
||||||
env:
|
env:
|
||||||
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
|
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
|
||||||
@@ -179,7 +196,7 @@ jobs:
|
|||||||
# own repo, otherwise the secrets are not available
|
# own repo, otherwise the secrets are not available
|
||||||
# Skip for Dependabot pull requests as these are run without secrets
|
# Skip for Dependabot pull requests as these are run without secrets
|
||||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
|
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
|
||||||
if: (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
if: ((github.repository == 'restic/restic' && github.event_name == 'push') || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
||||||
|
|
||||||
- name: Check changelog files with calens
|
- name: Check changelog files with calens
|
||||||
run: |
|
run: |
|
||||||
@@ -193,69 +210,52 @@ jobs:
|
|||||||
cross_compile:
|
cross_compile:
|
||||||
strategy:
|
strategy:
|
||||||
|
|
||||||
# ATTENTION: the list of architectures must be in sync with helpers/build-release-binaries/main.go!
|
|
||||||
matrix:
|
matrix:
|
||||||
# run cross-compile in three batches parallel so the overall tests run faster
|
# run cross-compile in three batches parallel so the overall tests run faster
|
||||||
targets:
|
subset:
|
||||||
- "linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64le linux/mips linux/mipsle linux/mips64 linux/mips64le linux/riscv64 linux/s390x"
|
- "0/3"
|
||||||
|
- "1/3"
|
||||||
- "openbsd/386 openbsd/amd64 \
|
- "2/3"
|
||||||
freebsd/386 freebsd/amd64 freebsd/arm \
|
|
||||||
aix/ppc64 \
|
|
||||||
darwin/amd64 darwin/arm64"
|
|
||||||
|
|
||||||
- "netbsd/386 netbsd/amd64 \
|
|
||||||
windows/386 windows/amd64 \
|
|
||||||
solaris/amd64"
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GOPROXY: https://proxy.golang.org
|
GOPROXY: https://proxy.golang.org
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
name: Cross Compile for ${{ matrix.targets }}
|
name: Cross Compile for subset ${{ matrix.subset }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ env.latest_go }}
|
- name: Set up Go ${{ env.latest_go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.latest_go }}
|
go-version: ${{ env.latest_go }}
|
||||||
|
|
||||||
- name: Install gox
|
|
||||||
run: |
|
|
||||||
go install github.com/mitchellh/gox@latest
|
|
||||||
|
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Cross-compile with gox for ${{ matrix.targets }}
|
- name: Cross-compile for subset ${{ matrix.subset }}
|
||||||
env:
|
|
||||||
GOFLAGS: "-trimpath"
|
|
||||||
GOX_ARCHS: "${{ matrix.targets }}"
|
|
||||||
run: |
|
run: |
|
||||||
mkdir build-output
|
mkdir build-output build-output-debug
|
||||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}" ./cmd/restic
|
go run ./helpers/build-release-binaries/main.go -o build-output -s . --platform-subset ${{ matrix.subset }}
|
||||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -tags debug -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}_debug" ./cmd/restic
|
go run ./helpers/build-release-binaries/main.go -o build-output-debug -s . --platform-subset ${{ matrix.subset }} --tags debug
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go ${{ env.latest_go }}
|
- name: Set up Go ${{ env.latest_go }}
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.latest_go }}
|
go-version: ${{ env.latest_go }}
|
||||||
|
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
with:
|
with:
|
||||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||||
version: v1.49
|
version: v1.55.2
|
||||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
|
||||||
only-new-issues: true
|
|
||||||
args: --verbose --timeout 5m
|
args: --verbose --timeout 5m
|
||||||
|
|
||||||
# only run golangci-lint for pull requests, otherwise ALL hints get
|
# only run golangci-lint for pull requests, otherwise ALL hints get
|
||||||
@@ -269,12 +269,27 @@ jobs:
|
|||||||
go mod tidy
|
go mod tidy
|
||||||
git diff --exit-code go.mod go.sum
|
git diff --exit-code go.mod go.sum
|
||||||
|
|
||||||
|
analyze:
|
||||||
|
name: Analyze results
|
||||||
|
needs: [test, cross_compile, lint]
|
||||||
|
if: always()
|
||||||
|
|
||||||
|
permissions: # no need to access code
|
||||||
|
contents: none
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Decide whether the needed jobs succeeded or failed
|
||||||
|
uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe
|
||||||
|
with:
|
||||||
|
jobs: ${{ toJSON(needs) }}
|
||||||
|
|
||||||
docker:
|
docker:
|
||||||
name: docker
|
name: docker
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
@@ -294,14 +309,14 @@ jobs:
|
|||||||
type=sha
|
type=sha
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
push: false
|
push: false
|
||||||
context: .
|
context: .
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
/restic
|
/restic
|
||||||
|
/restic.exe
|
||||||
/.vagrant
|
/.vagrant
|
||||||
/.vscode
|
/.vscode
|
||||||
|
@@ -10,13 +10,10 @@ linters:
|
|||||||
# make sure all errors returned by functions are handled
|
# make sure all errors returned by functions are handled
|
||||||
- errcheck
|
- errcheck
|
||||||
|
|
||||||
# find unused code
|
|
||||||
- deadcode
|
|
||||||
|
|
||||||
# show how code can be simplified
|
# show how code can be simplified
|
||||||
- gosimple
|
- gosimple
|
||||||
|
|
||||||
# # make sure code is formatted
|
# make sure code is formatted
|
||||||
- gofmt
|
- gofmt
|
||||||
|
|
||||||
# examine code and report suspicious constructs, such as Printf calls whose
|
# examine code and report suspicious constructs, such as Printf calls whose
|
||||||
@@ -35,12 +32,6 @@ linters:
|
|||||||
# find unused variables, functions, structs, types, etc.
|
# find unused variables, functions, structs, types, etc.
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
# find unused struct fields
|
|
||||||
- structcheck
|
|
||||||
|
|
||||||
# find unused global variables
|
|
||||||
- varcheck
|
|
||||||
|
|
||||||
# parse and typecheck code
|
# parse and typecheck code
|
||||||
- typecheck
|
- typecheck
|
||||||
|
|
||||||
@@ -57,3 +48,6 @@ issues:
|
|||||||
- don't use ALL_CAPS in Go names; use CamelCase
|
- don't use ALL_CAPS in Go names; use CamelCase
|
||||||
# revive: lots of packages don't have such a comment
|
# revive: lots of packages don't have such a comment
|
||||||
- "package-comments: should have a package comment"
|
- "package-comments: should have a package comment"
|
||||||
|
# staticcheck: there's no easy way to replace these packages
|
||||||
|
- "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated"
|
||||||
|
- "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated"
|
||||||
|
18
.readthedocs.yaml
Normal file
18
.readthedocs.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Read the Docs configuration file
|
||||||
|
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
build:
|
||||||
|
os: ubuntu-22.04
|
||||||
|
tools:
|
||||||
|
python: "3.11"
|
||||||
|
|
||||||
|
# Build documentation in the docs/ directory with Sphinx
|
||||||
|
sphinx:
|
||||||
|
configuration: doc/conf.py
|
||||||
|
|
||||||
|
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||||
|
python:
|
||||||
|
install:
|
||||||
|
- requirements: doc/requirements.txt
|
934
CHANGELOG.md
934
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -58,6 +58,19 @@ Please be aware that the debug log file will contain potentially sensitive
|
|||||||
things like file and directory names, so please either redact it before
|
things like file and directory names, so please either redact it before
|
||||||
uploading it somewhere or post only the parts that are really relevant.
|
uploading it somewhere or post only the parts that are really relevant.
|
||||||
|
|
||||||
|
If restic gets stuck, please also include a stacktrace in the description.
|
||||||
|
On non-Windows systems, you can send a SIGQUIT signal to restic or press
|
||||||
|
`Ctrl-\` to achieve the same result. This causes restic to print a stacktrace
|
||||||
|
and then exit immediatelly. This will not damage your repository, however,
|
||||||
|
it might be necessary to manually clean up stale lock files using
|
||||||
|
`restic unlock`.
|
||||||
|
|
||||||
|
On Windows, please set the environment variable `RESTIC_DEBUG_STACKTRACE_SIGINT`
|
||||||
|
to `true` and press `Ctrl-C` to create a stacktrace.
|
||||||
|
|
||||||
|
If you think restic uses too much memory or a too large cache directory, then
|
||||||
|
please include the output of `restic stats --mode debug`.
|
||||||
|
|
||||||
|
|
||||||
Development Environment
|
Development Environment
|
||||||
=======================
|
=======================
|
||||||
@@ -78,10 +91,40 @@ Then use the `go` tool to build restic:
|
|||||||
$ ./restic version
|
$ ./restic version
|
||||||
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
|
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
|
||||||
|
|
||||||
|
To create a debug build use:
|
||||||
|
|
||||||
|
$ go build -tags debug ./cmd/restic
|
||||||
|
|
||||||
You can run all tests with the following command:
|
You can run all tests with the following command:
|
||||||
|
|
||||||
$ go test ./...
|
$ go test ./...
|
||||||
|
|
||||||
|
|
||||||
|
Performance and Memory Usage Issues
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Debug builds of restic support the `--block-profile`, `--cpu-profile`,
|
||||||
|
`--mem-profile`, and `--trace-profile` options which collect performance data
|
||||||
|
that later on can be analyzed using the go tools:
|
||||||
|
|
||||||
|
$ restic --cpu-profile . [...]
|
||||||
|
$ go tool pprof -http localhost:12345 cpu.pprof
|
||||||
|
|
||||||
|
To analyze a trace profile use `go tool trace -http=localhost:12345 trace.out`.
|
||||||
|
|
||||||
|
As the memory usage of restic changes over time, it may be useful to capture a
|
||||||
|
snapshot of the current heap. This is possible using then `--listen-profile`
|
||||||
|
option. Then while restic runs you can query and afterwards analyze the heap statistics.
|
||||||
|
|
||||||
|
$ restic --listen-profile localhost:12345 [...]
|
||||||
|
$ curl http://localhost:12345/debug/pprof/heap -o heap.pprof
|
||||||
|
$ go tool pprof -http localhost:12345 heap.pprof
|
||||||
|
|
||||||
|
Further useful tools are setting the environment variable `GODEBUG=gctrace=1`,
|
||||||
|
which provides information about garbage collector runs. For a graphical variant
|
||||||
|
combine this with gcvis.
|
||||||
|
|
||||||
|
|
||||||
Providing Patches
|
Providing Patches
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
@@ -95,7 +95,7 @@ release. Instructions on how to do that are contained in the
|
|||||||
News
|
News
|
||||||
----
|
----
|
||||||
|
|
||||||
You can follow the restic project on Twitter [@resticbackup](https://twitter.com/resticbackup) or by subscribing to
|
You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to
|
||||||
the [project blog](https://restic.net/blog/).
|
the [project blog](https://restic.net/blog/).
|
||||||
|
|
||||||
License
|
License
|
||||||
|
6
build.go
6
build.go
@@ -380,6 +380,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
solarisMinVersion := GoVersion{Major: 1, Minor: 20, Patch: 0}
|
||||||
|
if env["GOARCH"] == "solaris" && !goVersion.AtLeast(solarisMinVersion) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Detected version %s is too old, restic requires at least %s for Solaris\n", goVersion, solarisMinVersion)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
verbosePrintf("detected Go version %v\n", goVersion)
|
verbosePrintf("detected Go version %v\n", goVersion)
|
||||||
|
|
||||||
preserveSymbols := false
|
preserveSymbols := false
|
||||||
|
@@ -3,7 +3,7 @@ Enhancement: Allow limiting IO concurrency for local and SFTP backend
|
|||||||
Restic did not support limiting the IO concurrency / number of connections for
|
Restic did not support limiting the IO concurrency / number of connections for
|
||||||
accessing repositories stored using the local or SFTP backends. The number of
|
accessing repositories stored using the local or SFTP backends. The number of
|
||||||
connections is now limited as for other backends, and can be configured via the
|
connections is now limited as for other backends, and can be configured via the
|
||||||
the `-o local.connections=2` and `-o sftp.connections=5` options. This ensures
|
`-o local.connections=2` and `-o sftp.connections=5` options. This ensures that
|
||||||
that restic does not overwhelm the backend with concurrent IO operations.
|
restic does not overwhelm the backend with concurrent IO operations.
|
||||||
|
|
||||||
https://github.com/restic/restic/pull/3475
|
https://github.com/restic/restic/pull/3475
|
||||||
|
7
changelog/0.16.0_2023-07-31/issue-1495
Normal file
7
changelog/0.16.0_2023-07-31/issue-1495
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Sort snapshots by timestamp in `restic find`
|
||||||
|
|
||||||
|
The `find` command used to print snapshots in an arbitrary order. Restic now
|
||||||
|
prints snapshots sorted by timestamp.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1495
|
||||||
|
https://github.com/restic/restic/pull/4409
|
21
changelog/0.16.0_2023-07-31/issue-1759
Normal file
21
changelog/0.16.0_2023-07-31/issue-1759
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
Enhancement: Add `repair index` and `repair snapshots` commands
|
||||||
|
|
||||||
|
The `rebuild-index` command has been renamed to `repair index`. The old name
|
||||||
|
will still work, but is deprecated.
|
||||||
|
|
||||||
|
When a snapshot was damaged, the only option up to now was to completely forget
|
||||||
|
the snapshot, even if only some unimportant files in it were damaged and other
|
||||||
|
files were still fine.
|
||||||
|
|
||||||
|
Restic now has a `repair snapshots` command, which can salvage any non-damaged
|
||||||
|
files and parts of files in the snapshots by removing damaged directories and
|
||||||
|
missing file contents. Please note that the damaged data may still be lost
|
||||||
|
and see the "Troubleshooting" section in the documentation for more details.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1759
|
||||||
|
https://github.com/restic/restic/issues/1714
|
||||||
|
https://github.com/restic/restic/issues/1798
|
||||||
|
https://github.com/restic/restic/issues/2334
|
||||||
|
https://github.com/restic/restic/pull/2876
|
||||||
|
https://forum.restic.net/t/corrupted-repo-how-to-repair/799
|
||||||
|
https://forum.restic.net/t/recovery-options-for-damaged-repositories/1571
|
8
changelog/0.16.0_2023-07-31/issue-1926
Normal file
8
changelog/0.16.0_2023-07-31/issue-1926
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Allow certificate paths to be passed through environment variables
|
||||||
|
|
||||||
|
Restic will now read paths to certificates from the environment variables
|
||||||
|
`RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert`
|
||||||
|
are not specified.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/1926
|
||||||
|
https://github.com/restic/restic/pull/4384
|
11
changelog/0.16.0_2023-07-31/issue-2359
Normal file
11
changelog/0.16.0_2023-07-31/issue-2359
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Enhancement: Provide multi-platform Docker images
|
||||||
|
|
||||||
|
The official Docker images are now built for the architectures linux/386,
|
||||||
|
linux/amd64, linux/arm and linux/arm64.
|
||||||
|
|
||||||
|
As an alternative to the Docker Hub, the Docker images are also
|
||||||
|
available on ghcr.io, the GitHub Container Registry.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2359
|
||||||
|
https://github.com/restic/restic/issues/4269
|
||||||
|
https://github.com/restic/restic/pull/4364
|
10
changelog/0.16.0_2023-07-31/issue-2468
Normal file
10
changelog/0.16.0_2023-07-31/issue-2468
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Enhancement: Add support for non-global Azure clouds
|
||||||
|
|
||||||
|
The `azure` backend previously only supported storages using the global domain
|
||||||
|
`core.windows.net`. This meant that backups to other domains such as Azure
|
||||||
|
China (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were
|
||||||
|
not supported. Restic now allows overriding the global domain using the
|
||||||
|
environment variable `AZURE_ENDPOINT_SUFFIX`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2468
|
||||||
|
https://github.com/restic/restic/pull/4387
|
10
changelog/0.16.0_2023-07-31/issue-2565
Normal file
10
changelog/0.16.0_2023-07-31/issue-2565
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Bugfix: Support "unlimited" in `forget --keep-*` options
|
||||||
|
|
||||||
|
Restic would previously forget snapshots that should have been kept when a
|
||||||
|
negative value was passed to the `--keep-*` options. Negative values are now
|
||||||
|
forbidden. To keep all snapshots, the special value `unlimited` is now
|
||||||
|
supported. For example, `--keep-monthly unlimited` will keep all monthly
|
||||||
|
snapshots.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2565
|
||||||
|
https://github.com/restic/restic/pull/4234
|
12
changelog/0.16.0_2023-07-31/issue-3311
Normal file
12
changelog/0.16.0_2023-07-31/issue-3311
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Bugfix: Support non-UTF8 paths as symlink target
|
||||||
|
|
||||||
|
Earlier restic versions did not correctly `backup` and `restore` symlinks that
|
||||||
|
contain a non-UTF8 target. Note that this only affected systems that still use
|
||||||
|
a non-Unicode encoding for filesystem paths.
|
||||||
|
|
||||||
|
The repository format is now extended to add support for such symlinks. Please
|
||||||
|
note that snapshots must have been created with at least restic version 0.16.0
|
||||||
|
for `restore` to correctly handle non-UTF8 symlink targets when restoring them.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3311
|
||||||
|
https://github.com/restic/restic/pull/3802
|
9
changelog/0.16.0_2023-07-31/issue-3328
Normal file
9
changelog/0.16.0_2023-07-31/issue-3328
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Reduce memory usage by up to 25%
|
||||||
|
|
||||||
|
The in-memory index has been optimized to be more garbage collection friendly.
|
||||||
|
Restic now defaults to `GOGC=50` to run the Go garbage collector more
|
||||||
|
frequently.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3328
|
||||||
|
https://github.com/restic/restic/pull/4352
|
||||||
|
https://github.com/restic/restic/pull/4353
|
11
changelog/0.16.0_2023-07-31/issue-3397
Normal file
11
changelog/0.16.0_2023-07-31/issue-3397
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Enhancement: Improve accuracy of ETA displayed during backup
|
||||||
|
|
||||||
|
Restic's `backup` command displayed an ETA that did not adapt when the rate of
|
||||||
|
progress made during the backup changed during the course of the backup.
|
||||||
|
|
||||||
|
Restic now uses recent progress when computing the ETA. It is important to
|
||||||
|
realize that the estimate may still be wrong, because restic cannot predict
|
||||||
|
the future, but the hope is that the ETA will be more accurate in most cases.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3397
|
||||||
|
https://github.com/restic/restic/pull/3563
|
9
changelog/0.16.0_2023-07-31/issue-3624
Normal file
9
changelog/0.16.0_2023-07-31/issue-3624
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Keep oldest snapshot when there are not enough snapshots
|
||||||
|
|
||||||
|
The `forget` command now additionally preserves the oldest snapshot if fewer
|
||||||
|
snapshots than allowed by the `--keep-*` parameters would otherwise be kept.
|
||||||
|
This maximizes the amount of history kept within the specified limits.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3624
|
||||||
|
https://github.com/restic/restic/pull/4366
|
||||||
|
https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
|
8
changelog/0.16.0_2023-07-31/issue-3698
Normal file
8
changelog/0.16.0_2023-07-31/issue-3698
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add support for Managed / Workload Identity to `azure` backend
|
||||||
|
|
||||||
|
Restic now additionally supports authenticating to Azure using Workload
|
||||||
|
Identity or Managed Identity credentials, which are automatically injected in
|
||||||
|
several environments such as a managed Kubernetes cluster.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3698
|
||||||
|
https://github.com/restic/restic/pull/4029
|
22
changelog/0.16.0_2023-07-31/issue-3871
Normal file
22
changelog/0.16.0_2023-07-31/issue-3871
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Enhancement: Support `<snapshot>:<subfolder>` syntax to select subfolders
|
||||||
|
|
||||||
|
Commands like `diff` or `restore` always worked with the full snapshot. This
|
||||||
|
did not allow comparing only a specific subfolder or only restoring that folder
|
||||||
|
(`restore --include subfolder` filters the restored files, but still creates the
|
||||||
|
directories included in `subfolder`).
|
||||||
|
|
||||||
|
The commands `diff`, `dump`, `ls` and `restore` now support the
|
||||||
|
`<snapshot>:<subfolder>` syntax, where `snapshot` is the ID of a snapshot (or
|
||||||
|
the string `latest`) and `subfolder` is a path within the snapshot. The
|
||||||
|
commands will then only work with the specified path of the snapshot. The
|
||||||
|
`subfolder` must be a path to a folder as returned by `ls`. Two examples:
|
||||||
|
|
||||||
|
`restic restore -t target latest:/some/path`
|
||||||
|
`restic diff 12345678:/some/path 90abcef:/some/path`
|
||||||
|
|
||||||
|
For debugging purposes, the `cat` command now supports `cat tree
|
||||||
|
<snapshot>:<subfolder>` to return the directory metadata for the given
|
||||||
|
subfolder.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3871
|
||||||
|
https://github.com/restic/restic/pull/4334
|
17
changelog/0.16.0_2023-07-31/issue-3941
Normal file
17
changelog/0.16.0_2023-07-31/issue-3941
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
Enhancement: Support `--group-by` for backup parent selection
|
||||||
|
|
||||||
|
Previously, the `backup` command by default selected the parent snapshot based
|
||||||
|
on the hostname and the backup targets. When the backup path list changed, the
|
||||||
|
`backup` command was unable to determine a suitable parent snapshot and had to
|
||||||
|
read all files again.
|
||||||
|
|
||||||
|
The new `--group-by` option for the `backup` command allows filtering snapshots
|
||||||
|
for the parent selection by `host`, `paths` and `tags`. It defaults to
|
||||||
|
`host,paths` which selects the latest snapshot with hostname and paths matching
|
||||||
|
those of the backup run. This matches the behavior of prior restic versions.
|
||||||
|
|
||||||
|
The new `--group-by` option should be set to the same value as passed to
|
||||||
|
`forget --group-by`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/3941
|
||||||
|
https://github.com/restic/restic/pull/4081
|
9
changelog/0.16.0_2023-07-31/issue-4130
Normal file
9
changelog/0.16.0_2023-07-31/issue-4130
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Cancel current command if cache becomes unusable
|
||||||
|
|
||||||
|
If the cache directory was removed or ran out of space while restic was
|
||||||
|
running, this would previously cause further caching attempts to fail and
|
||||||
|
thereby drastically slow down the command execution. Now, the currently running
|
||||||
|
command is instead canceled.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4130
|
||||||
|
https://github.com/restic/restic/pull/4166
|
12
changelog/0.16.0_2023-07-31/issue-4159
Normal file
12
changelog/0.16.0_2023-07-31/issue-4159
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Enhancement: Add `--human-readable` option to `ls` and `find` commands
|
||||||
|
|
||||||
|
Previously, when using the `-l` option with the `ls` and `find` commands, the
|
||||||
|
displayed size was always in bytes, without an option for a more human readable
|
||||||
|
format such as MiB or GiB.
|
||||||
|
|
||||||
|
The new `--human-readable` option will convert longer size values into more
|
||||||
|
human friendly values with an appropriate suffix depending on the output size.
|
||||||
|
For example, a size of `14680064` will be shown as `14.000 MiB`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4159
|
||||||
|
https://github.com/restic/restic/pull/4351
|
8
changelog/0.16.0_2023-07-31/issue-4188
Normal file
8
changelog/0.16.0_2023-07-31/issue-4188
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Include restic version in snapshot metadata
|
||||||
|
|
||||||
|
The restic version used to backup a snapshot is now included in its metadata
|
||||||
|
and shown when inspecting a snapshot using `restic cat snapshot <snapshotID>`
|
||||||
|
or `restic snapshots --json`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4188
|
||||||
|
https://github.com/restic/restic/pull/4378
|
9
changelog/0.16.0_2023-07-31/issue-4199
Normal file
9
changelog/0.16.0_2023-07-31/issue-4199
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Bugfix: Avoid lock refresh issues on slow network connections
|
||||||
|
|
||||||
|
On network connections with a low upload speed, backups and other operations
|
||||||
|
could fail with the error message `Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed by reworking the lock refresh handling.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4199
|
||||||
|
https://github.com/restic/restic/pull/4304
|
14
changelog/0.16.0_2023-07-31/issue-426
Normal file
14
changelog/0.16.0_2023-07-31/issue-426
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
Enhancement: Show progress bar during restore
|
||||||
|
|
||||||
|
The `restore` command now shows a progress report while restoring files.
|
||||||
|
|
||||||
|
Example: `[0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB`
|
||||||
|
|
||||||
|
JSON output is now also supported.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/426
|
||||||
|
https://github.com/restic/restic/issues/3413
|
||||||
|
https://github.com/restic/restic/issues/3627
|
||||||
|
https://github.com/restic/restic/pull/3991
|
||||||
|
https://github.com/restic/restic/pull/4314
|
||||||
|
https://forum.restic.net/t/progress-bar-for-restore/5210
|
11
changelog/0.16.0_2023-07-31/issue-4274
Normal file
11
changelog/0.16.0_2023-07-31/issue-4274
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Improve lock refresh handling after standby
|
||||||
|
|
||||||
|
If the restic process was stopped or the host running restic entered standby
|
||||||
|
during a long running operation such as a backup, this previously resulted in
|
||||||
|
the operation failing with `Fatal: failed to refresh lock in time`.
|
||||||
|
|
||||||
|
This has now been fixed such that restic first checks whether it is safe to
|
||||||
|
continue the current operation and only throws an error if not.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4274
|
||||||
|
https://github.com/restic/restic/pull/4374
|
8
changelog/0.16.0_2023-07-31/issue-4375
Normal file
8
changelog/0.16.0_2023-07-31/issue-4375
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add support for extended attributes on symlinks
|
||||||
|
|
||||||
|
Restic now supports extended attributes on symlinks when backing up,
|
||||||
|
restoring, or FUSE-mounting snapshots. This includes, for example, the
|
||||||
|
`security.selinux` xattr on Linux distributions that use SELinux.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4375
|
||||||
|
https://github.com/restic/restic/pull/4379
|
8
changelog/0.16.0_2023-07-31/issue-719
Normal file
8
changelog/0.16.0_2023-07-31/issue-719
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Add `--retry-lock` option
|
||||||
|
|
||||||
|
This option allows specifying a duration for which restic will wait if the
|
||||||
|
repository is already locked.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/719
|
||||||
|
https://github.com/restic/restic/pull/2214
|
||||||
|
https://github.com/restic/restic/pull/4107
|
8
changelog/0.16.0_2023-07-31/pull-3261
Normal file
8
changelog/0.16.0_2023-07-31/pull-3261
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Enhancement: Reduce file fragmentation for local backend
|
||||||
|
|
||||||
|
Before this change, local backend files could become fragmented.
|
||||||
|
Now restic will try to preallocate space for pack files to avoid
|
||||||
|
their fragmentation.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/2679
|
||||||
|
https://github.com/restic/restic/pull/3261
|
7
changelog/0.16.0_2023-07-31/pull-4176
Normal file
7
changelog/0.16.0_2023-07-31/pull-4176
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Change: Fix JSON message type of `scan_finished` for the `backup` command
|
||||||
|
|
||||||
|
Restic incorrectly set the `message_type` of the `scan_finished` message to
|
||||||
|
`status` instead of `verbose_status`. This has now been corrected so that
|
||||||
|
the messages report the correct type.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4176
|
7
changelog/0.16.0_2023-07-31/pull-4201
Normal file
7
changelog/0.16.0_2023-07-31/pull-4201
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Change: Require Go 1.20 for Solaris builds
|
||||||
|
|
||||||
|
Building restic on Solaris now requires Go 1.20, as the library used to access
|
||||||
|
Azure uses the mmap syscall, which is only available on Solaris starting from
|
||||||
|
Go 1.20. All other platforms however continue to build with Go 1.18.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4201
|
6
changelog/0.16.0_2023-07-31/pull-4220
Normal file
6
changelog/0.16.0_2023-07-31/pull-4220
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Enhancement: Add `jq` binary to Docker image
|
||||||
|
|
||||||
|
The Docker image now contains `jq`, which can be useful to process JSON data
|
||||||
|
output by restic.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4220
|
7
changelog/0.16.0_2023-07-31/pull-4226
Normal file
7
changelog/0.16.0_2023-07-31/pull-4226
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Allow specifying region of new buckets in the `gs` backend
|
||||||
|
|
||||||
|
Previously, buckets used by the Google Cloud Storage backend would always get
|
||||||
|
created in the "us" region. It is now possible to specify the region where a
|
||||||
|
bucket should be created by using the `-o gs.region=us` option.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4226
|
8
changelog/0.16.0_2023-07-31/pull-4318
Normal file
8
changelog/0.16.0_2023-07-31/pull-4318
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Correctly clean up status bar output of the `backup` command
|
||||||
|
|
||||||
|
Due to a regression in restic 0.15.2, the status bar of the `backup` command
|
||||||
|
could leave some output behind. This happened if filenames were printed that
|
||||||
|
are wider than the current terminal width. This has now been fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4319
|
||||||
|
https://github.com/restic/restic/pull/4318
|
3
changelog/0.16.0_2023-07-31/pull-4333
Normal file
3
changelog/0.16.0_2023-07-31/pull-4333
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Bugfix: `generate` and `init` no longer silently ignore unexpected arguments
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4333
|
8
changelog/0.16.0_2023-07-31/pull-4400
Normal file
8
changelog/0.16.0_2023-07-31/pull-4400
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Ignore missing folders in `rest` backend
|
||||||
|
|
||||||
|
If a repository accessed via the REST backend was missing folders, then restic
|
||||||
|
would fail with an error while trying to list the data in the repository. This
|
||||||
|
has been now fixed.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4400
|
||||||
|
https://github.com/restic/rest-server/issues/235
|
9
changelog/0.16.1_2023-10-24/issue-4128
Normal file
9
changelog/0.16.1_2023-10-24/issue-4128
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Enhancement: Automatically set `GOMAXPROCS` in resource-constrained containers
|
||||||
|
|
||||||
|
When running restic in a Linux container with CPU-usage limits, restic now
|
||||||
|
automatically adjusts `GOMAXPROCS`. This helps to reduce the memory consumption
|
||||||
|
on hosts with many CPU cores.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4128
|
||||||
|
https://github.com/restic/restic/pull/4485
|
||||||
|
https://github.com/restic/restic/pull/4531
|
8
changelog/0.16.1_2023-10-24/issue-4513
Normal file
8
changelog/0.16.1_2023-10-24/issue-4513
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Make `key list` command honor `--no-lock`
|
||||||
|
|
||||||
|
The `key list` command now supports the `--no-lock` options. This allows
|
||||||
|
determining which keys a repo can be accessed by without the need for having
|
||||||
|
write access (e.g., read-only sftp access, filesystem snapshot).
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4513
|
||||||
|
https://github.com/restic/restic/pull/4514
|
8
changelog/0.16.1_2023-10-24/issue-4516
Normal file
8
changelog/0.16.1_2023-10-24/issue-4516
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Do not try to load password on command line autocomplete
|
||||||
|
|
||||||
|
The command line autocompletion previously tried to load the repository
|
||||||
|
password. This could cause the autocompletion not to work. Now, this step gets
|
||||||
|
skipped.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4516
|
||||||
|
https://github.com/restic/restic/pull/4526
|
22
changelog/0.16.1_2023-10-24/issue-4523
Normal file
22
changelog/0.16.1_2023-10-24/issue-4523
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Bugfix: Update zstd library to fix possible data corruption at max. compression
|
||||||
|
|
||||||
|
In restic 0.16.0, backups where the compression level was set to `max` (using
|
||||||
|
`--compression max`) could in rare and very specific circumstances result in
|
||||||
|
data corruption due to a bug in the library used for compressing data.
|
||||||
|
|
||||||
|
Restic now uses the latest version of the library used to compress data, which
|
||||||
|
includes a fix for this issue. Please note that the `auto` compression level
|
||||||
|
(which restic uses by default) was never affected, and even if you used `max`
|
||||||
|
compression, chances of being affected by this issue were very small.
|
||||||
|
|
||||||
|
To check a repository for any corruption, run `restic check --read-data`. This
|
||||||
|
will download and verify the whole repository and can be used at any time to
|
||||||
|
completely verify the integrity of a repository. If the `check` command detects
|
||||||
|
anomalies, follow the suggested steps.
|
||||||
|
|
||||||
|
To simplify any needed repository repair and minimize data loss, there is also
|
||||||
|
a new and experimental `repair packs` command that salvages all valid data from
|
||||||
|
the affected pack files (see `restic help repair packs` for more information).
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4523
|
||||||
|
https://github.com/restic/restic/pull/4530
|
7
changelog/0.16.1_2023-10-24/pull-299
Normal file
7
changelog/0.16.1_2023-10-24/pull-299
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Show progress bar while loading the index
|
||||||
|
|
||||||
|
Restic did not provide any feedback while loading index files. Now, there is a
|
||||||
|
progress bar that shows the index loading progress.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/229
|
||||||
|
https://github.com/restic/restic/pull/4419
|
11
changelog/0.16.1_2023-10-24/pull-4480
Normal file
11
changelog/0.16.1_2023-10-24/pull-4480
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Enhancement: Allow setting REST password and username via environment variables
|
||||||
|
|
||||||
|
Previously, it was only possible to specify the REST-server username and
|
||||||
|
password in the repository URL, or by using the `--repository-file` option.
|
||||||
|
This meant it was not possible to use authentication in contexts where the
|
||||||
|
repository URL is stored in publicly accessible way.
|
||||||
|
|
||||||
|
Restic now allows setting the username and password using the
|
||||||
|
`RESTIC_REST_USERNAME` and `RESTIC_REST_PASSWORD` variables.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4480
|
7
changelog/0.16.1_2023-10-24/pull-4511
Normal file
7
changelog/0.16.1_2023-10-24/pull-4511
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Enhancement: Include inode numbers in JSON output for `find` and `ls` commands
|
||||||
|
|
||||||
|
Restic used to omit the inode numbers in the JSON messages emitted for nodes by
|
||||||
|
the `ls` command as well as for matches by the `find` command. It now includes
|
||||||
|
those values whenever they are available.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4511
|
12
changelog/0.16.1_2023-10-24/pull-4519
Normal file
12
changelog/0.16.1_2023-10-24/pull-4519
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Enhancement: Add config option to set SFTP command arguments
|
||||||
|
|
||||||
|
When using the `sftp` backend, scenarios where a custom identity file was
|
||||||
|
needed for the SSH connection, required the full command to be specified:
|
||||||
|
`-o sftp.command='ssh user@host:port -i /ssh/my_private_key -s sftp'`
|
||||||
|
|
||||||
|
Now, the `-o sftp.args=...` option can be passed to restic to specify
|
||||||
|
custom arguments for the SSH command executed by the SFTP backend.
|
||||||
|
This simplifies the above example to `-o sftp.args='-i /ssh/my_private_key'`.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4519
|
||||||
|
https://github.com/restic/restic/issues/4241
|
8
changelog/0.16.1_2023-10-24/pull-4532
Normal file
8
changelog/0.16.1_2023-10-24/pull-4532
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Change: Update dependencies and require Go 1.19 or newer
|
||||||
|
|
||||||
|
We have updated all dependencies. Since some libraries require newer Go
|
||||||
|
standard library features, support for Go 1.18 has been dropped, which means
|
||||||
|
that restic now requires at least Go 1.19 to build.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4532
|
||||||
|
https://github.com/restic/restic/pull/4533
|
9
changelog/0.16.2_2023-10-29/issue-4540
Normal file
9
changelog/0.16.2_2023-10-29/issue-4540
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Bugfix: Restore ARMv5 support for ARM binaries
|
||||||
|
|
||||||
|
The official release binaries for restic 0.16.1 were accidentally built to
|
||||||
|
require ARMv7. The build process is now updated to restore support for ARMv5.
|
||||||
|
|
||||||
|
Please note that restic 0.17.0 will drop support for ARMv5 and require at least
|
||||||
|
ARMv6.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4540
|
8
changelog/0.16.2_2023-10-29/pull-4545
Normal file
8
changelog/0.16.2_2023-10-29/pull-4545
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Bugfix: Repair documentation build on Read the Docs
|
||||||
|
|
||||||
|
For restic 0.16.1, no documentation was available at
|
||||||
|
https://restic.readthedocs.io/ .
|
||||||
|
|
||||||
|
The documentation build process is now updated to work again.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4545
|
14
changelog/0.16.3_2024-01-14/issue-4560
Normal file
14
changelog/0.16.3_2024-01-14/issue-4560
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
Bugfix: Improve errors for irregular files on Windows
|
||||||
|
|
||||||
|
Since Go 1.21, most filesystem reparse points on Windows are considered to be
|
||||||
|
irregular files. This caused restic to show an `error: invalid node type ""`
|
||||||
|
error message for those files.
|
||||||
|
|
||||||
|
This error message has now been improved and includes the relevant file path:
|
||||||
|
`error: nodeFromFileInfo path/to/file: unsupported file type "irregular"`.
|
||||||
|
As irregular files are not required to behave like regular files, it is not
|
||||||
|
possible to provide a generic way to back up those files.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4560
|
||||||
|
https://github.com/restic/restic/pull/4620
|
||||||
|
https://forum.restic.net/t/windows-backup-error-invalid-node-type/6875
|
11
changelog/0.16.3_2024-01-14/issue-4574
Normal file
11
changelog/0.16.3_2024-01-14/issue-4574
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Support backup of deduplicated files on Windows again
|
||||||
|
|
||||||
|
With the official release builds of restic 0.16.1 and 0.16.2, it was not
|
||||||
|
possible to back up files that were deduplicated by the corresponding
|
||||||
|
Windows Server feature. This also applied to restic versions built using
|
||||||
|
Go 1.21.0-1.21.4.
|
||||||
|
|
||||||
|
The Go version used to build restic has now been updated to fix this.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4574
|
||||||
|
https://github.com/restic/restic/pull/4621
|
11
changelog/0.16.3_2024-01-14/issue-4612
Normal file
11
changelog/0.16.3_2024-01-14/issue-4612
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Improve error handling for `rclone` backend
|
||||||
|
|
||||||
|
Since restic 0.16.0, if rclone encountered an error while listing files,
|
||||||
|
this could in rare circumstances cause restic to assume that there are no
|
||||||
|
files. Although unlikely, this situation could result in data loss if it
|
||||||
|
were to happen right when the `prune` command is listing existing snapshots.
|
||||||
|
|
||||||
|
Error handling has now been improved to detect and work around this case.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4612
|
||||||
|
https://github.com/restic/restic/pull/4618
|
11
changelog/0.16.3_2024-01-14/pull-4624
Normal file
11
changelog/0.16.3_2024-01-14/pull-4624
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Correct `restore` progress information if an error occurs
|
||||||
|
|
||||||
|
If an error occurred while restoring a snapshot, this could cause the `restore`
|
||||||
|
progress bar to show incorrect information. In addition, if a data file could
|
||||||
|
not be loaded completely, then errors would also be reported for some already
|
||||||
|
restored files.
|
||||||
|
|
||||||
|
Error reporting of the `restore` command has now been made more accurate.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4624
|
||||||
|
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943
|
11
changelog/0.16.3_2024-01-14/pull-4626
Normal file
11
changelog/0.16.3_2024-01-14/pull-4626
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
Bugfix: Improve reliability of restoring large files
|
||||||
|
|
||||||
|
In some cases restic failed to restore large files that frequently contain the
|
||||||
|
same file chunk. In combination with certain backends, this could result in
|
||||||
|
network connection timeouts that caused incomplete restores.
|
||||||
|
|
||||||
|
Restic now includes special handling for such file chunks to ensure reliable
|
||||||
|
restores.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4626
|
||||||
|
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943
|
18
changelog/0.16.4_2024-02-04/issue-4529
Normal file
18
changelog/0.16.4_2024-02-04/issue-4529
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
Enhancement: Add extra verification of data integrity before upload
|
||||||
|
|
||||||
|
Hardware issues, or a bug in restic or its dependencies, could previously cause
|
||||||
|
corruption in the files restic created and stored in the repository. Detecting
|
||||||
|
such corruption previously required explicitly running the `check --read-data`
|
||||||
|
or `check --read-data-subset` commands.
|
||||||
|
|
||||||
|
To further ensure data integrity, even in the case of hardware issues or
|
||||||
|
software bugs, restic now performs additional verification of the files about
|
||||||
|
to be uploaded to the repository.
|
||||||
|
|
||||||
|
These extra checks will increase CPU usage during backups. They can therefore,
|
||||||
|
if absolutely necessary, be disabled using the `--no-extra-verify` global
|
||||||
|
option. Please note that this should be combined with more active checking
|
||||||
|
using the previously mentioned check commands.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4529
|
||||||
|
https://github.com/restic/restic/pull/4681
|
19
changelog/0.16.4_2024-02-04/issue-4677
Normal file
19
changelog/0.16.4_2024-02-04/issue-4677
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Bugfix: Downgrade zstd library to fix rare data corruption at max. compression
|
||||||
|
|
||||||
|
In restic 0.16.3, backups where the compression level was set to `max` (using
|
||||||
|
`--compression max`) could in rare and very specific circumstances result in
|
||||||
|
data corruption due to a bug in the library used for compressing data. Restic
|
||||||
|
0.16.1 and 0.16.2 were not affected.
|
||||||
|
|
||||||
|
Restic now uses the previous version of the library used to compress data, the
|
||||||
|
same version used by restic 0.16.2. Please note that the `auto` compression
|
||||||
|
level (which restic uses by default) was never affected, and even if you used
|
||||||
|
`max` compression, chances of being affected by this issue are small.
|
||||||
|
|
||||||
|
To check a repository for any corruption, run `restic check --read-data`. This
|
||||||
|
will download and verify the whole repository and can be used at any time to
|
||||||
|
completely verify the integrity of a repository. If the `check` command detects
|
||||||
|
anomalies, follow the suggested steps.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4677
|
||||||
|
https://github.com/restic/restic/pull/4679
|
6
changelog/0.16.5_2024-07-01/issue-4873
Normal file
6
changelog/0.16.5_2024-07-01/issue-4873
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Enhancement: Update dependencies
|
||||||
|
|
||||||
|
A few potentially vulnerable dependencies were updated.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/issues/4873
|
||||||
|
https://github.com/restic/restic/pull/4878
|
5
changelog/0.16.5_2024-07-01/pull-4799
Normal file
5
changelog/0.16.5_2024-07-01/pull-4799
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Enhancement: Add option to force use of Azure CLI credential
|
||||||
|
|
||||||
|
A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of Azure CLI credential, ignoring other credentials like managed identity.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/4799
|
@@ -1,18 +1,21 @@
|
|||||||
{{- range $changes := . }}{{ with $changes -}}
|
# Table of Contents
|
||||||
Changelog for restic {{ .Version }} ({{ .Date }})
|
|
||||||
=======================================
|
|
||||||
|
|
||||||
|
{{ range . -}}
|
||||||
|
* [Changelog for {{ .Version }}](#changelog-for-restic-{{ .Version | replace "." ""}}-{{ .Date | lower -}})
|
||||||
|
{{ end -}}
|
||||||
|
|
||||||
|
{{- range $changes := . }}{{ with $changes }}
|
||||||
|
|
||||||
|
# Changelog for restic {{ .Version }} ({{ .Date }})
|
||||||
The following sections list the changes in restic {{ .Version }} relevant to
|
The following sections list the changes in restic {{ .Version }} relevant to
|
||||||
restic users. The changes are ordered by importance.
|
restic users. The changes are ordered by importance.
|
||||||
|
|
||||||
Summary
|
## Summary
|
||||||
-------
|
|
||||||
{{ range $entry := .Entries }}{{ with $entry }}
|
{{ range $entry := .Entries }}{{ with $entry }}
|
||||||
* {{ .TypeShort }} #{{ .PrimaryID }}: {{ .Title }}
|
* {{ .TypeShort }} #{{ .PrimaryID }}: {{ .Title }}
|
||||||
{{- end }}{{ end }}
|
{{- end }}{{ end }}
|
||||||
|
|
||||||
Details
|
## Details
|
||||||
-------
|
|
||||||
{{ range $entry := .Entries }}{{ with $entry }}
|
{{ range $entry := .Entries }}{{ with $entry }}
|
||||||
* {{ .Type }} #{{ .PrimaryID }}: {{ .Title }}
|
* {{ .Type }} #{{ .PrimaryID }}: {{ .Title }}
|
||||||
{{ range $par := .Paragraphs }}
|
{{ range $par := .Paragraphs }}
|
||||||
@@ -27,6 +30,5 @@ Details
|
|||||||
{{ range $url := .OtherURLs }}
|
{{ range $url := .OtherURLs }}
|
||||||
{{ $url -}}
|
{{ $url -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ end }}{{ end }}
|
{{ end }}{{ end -}}
|
||||||
|
|
||||||
{{ end }}{{ end -}}
|
{{ end }}{{ end -}}
|
||||||
|
@@ -1,16 +1,17 @@
|
|||||||
# The first line must start with Bugfix:, Enhancement: or Change:,
|
# The first line must start with Bugfix:, Enhancement: or Change:,
|
||||||
# including the colon. Use present tense. Remove lines starting with '#'
|
# including the colon. Use present tense and the imperative mood. Remove
|
||||||
# from this template.
|
# lines starting with '#' from this template.
|
||||||
Enhancement: Allow custom bar in the foo command
|
Enhancement: Allow custom bar in the foo command
|
||||||
|
|
||||||
# Describe the problem in the past tense, the new behavior in the present
|
# Describe the problem in the past tense, the new behavior in the present
|
||||||
# tense. Mention the affected commands, backends, operating systems, etc.
|
# tense. Mention the affected commands, backends, operating systems, etc.
|
||||||
# Focus on user-facing behavior, not the implementation.
|
# Focus on user-facing behavior, not the implementation.
|
||||||
|
# Use "Restic now ..." instead of "We have changed ...".
|
||||||
|
|
||||||
Restic foo always used the system-wide bar when deciding how to frob an
|
Restic foo always used the system-wide bar when deciding how to frob an
|
||||||
item in the baz backend. It now permits selecting the bar with --bar or
|
item in the `baz` backend. It now permits selecting the bar with `--bar`
|
||||||
the environment variable RESTIC_BAR. The system-wide bar is still the
|
or the environment variable `RESTIC_BAR`. The system-wide bar is still
|
||||||
default.
|
the default.
|
||||||
|
|
||||||
# The last section is a list of issue, PR and forum URLs.
|
# The last section is a list of issue, PR and forum URLs.
|
||||||
# The first issue ID determines the filename for the changelog entry:
|
# The first issue ID determines the filename for the changelog entry:
|
||||||
|
@@ -62,6 +62,12 @@ func CleanupHandler(c <-chan os.Signal) {
|
|||||||
debug.Log("signal %v received, cleaning up", s)
|
debug.Log("signal %v received, cleaning up", s)
|
||||||
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
||||||
|
|
||||||
|
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
||||||
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
||||||
|
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
||||||
|
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
||||||
|
}
|
||||||
|
|
||||||
code := 0
|
code := 0
|
||||||
|
|
||||||
if s == syscall.SIGINT {
|
if s == syscall.SIGINT {
|
||||||
@@ -78,5 +84,6 @@ func CleanupHandler(c <-chan os.Signal) {
|
|||||||
// given exit code.
|
// given exit code.
|
||||||
func Exit(code int) {
|
func Exit(code int) {
|
||||||
code = RunCleanupHandlers(code)
|
code = RunCleanupHandlers(code)
|
||||||
|
debug.Log("exiting with status code %d", code)
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
@@ -89,6 +89,7 @@ type BackupOptions struct {
|
|||||||
excludePatternOptions
|
excludePatternOptions
|
||||||
|
|
||||||
Parent string
|
Parent string
|
||||||
|
GroupBy restic.SnapshotGroupByOptions
|
||||||
Force bool
|
Force bool
|
||||||
ExcludeOtherFS bool
|
ExcludeOtherFS bool
|
||||||
ExcludeIfPresent []string
|
ExcludeIfPresent []string
|
||||||
@@ -120,7 +121,9 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdBackup)
|
cmdRoot.AddCommand(cmdBackup)
|
||||||
|
|
||||||
f := cmdBackup.Flags()
|
f := cmdBackup.Flags()
|
||||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)")
|
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)")
|
||||||
|
backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
|
f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
||||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
||||||
|
|
||||||
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
|
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
|
||||||
@@ -305,7 +308,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
// collectRejectByNameFuncs returns a list of all functions which may reject data
|
// collectRejectByNameFuncs returns a list of all functions which may reject data
|
||||||
// from being saved in a snapshot based on path only
|
// from being saved in a snapshot based on path only
|
||||||
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
|
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
|
||||||
// exclude restic cache
|
// exclude restic cache
|
||||||
if repo.Cache != nil {
|
if repo.Cache != nil {
|
||||||
f, err := rejectResticCache(repo)
|
f, err := rejectResticCache(repo)
|
||||||
@@ -340,7 +343,7 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
|
|||||||
|
|
||||||
// collectRejectFuncs returns a list of all functions which may reject data
|
// collectRejectFuncs returns a list of all functions which may reject data
|
||||||
// from being saved in a snapshot based on path and file info
|
// from being saved in a snapshot based on path and file info
|
||||||
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
|
func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
|
||||||
// allowed devices
|
// allowed devices
|
||||||
if opts.ExcludeOtherFS && !opts.Stdin {
|
if opts.ExcludeOtherFS && !opts.Stdin {
|
||||||
f, err := rejectByDevice(targets)
|
f, err := rejectByDevice(targets)
|
||||||
@@ -439,13 +442,18 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
|
|||||||
if snName == "" {
|
if snName == "" {
|
||||||
snName = "latest"
|
snName = "latest"
|
||||||
}
|
}
|
||||||
f := restic.SnapshotFilter{
|
f := restic.SnapshotFilter{TimestampLimit: timeStampLimit}
|
||||||
Hosts: []string{opts.Host},
|
if opts.GroupBy.Host {
|
||||||
Paths: targets,
|
f.Hosts = []string{opts.Host}
|
||||||
TimestampLimit: timeStampLimit,
|
}
|
||||||
|
if opts.GroupBy.Path {
|
||||||
|
f.Paths = targets
|
||||||
|
}
|
||||||
|
if opts.GroupBy.Tag {
|
||||||
|
f.Tags = []restic.TagList{opts.Tags.Flatten()}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
|
sn, _, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
|
||||||
// Snapshot not found is ok if no explicit parent was set
|
// Snapshot not found is ok if no explicit parent was set
|
||||||
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
|
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
|
||||||
err = nil
|
err = nil
|
||||||
@@ -498,20 +506,23 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
progressPrinter.V("lock repository")
|
progressPrinter.V("lock repository")
|
||||||
}
|
}
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
if !opts.DryRun {
|
||||||
defer unlockRepo(lock)
|
var lock *restic.Lock
|
||||||
if err != nil {
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
return err
|
defer unlockRepo(lock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
||||||
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
|
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// rejectFuncs collect functions that can reject items from the backup based on path and file info
|
// rejectFuncs collect functions that can reject items from the backup based on path and file info
|
||||||
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
|
rejectFuncs, err := collectRejectFuncs(opts, targets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -535,7 +546,10 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
progressPrinter.V("load index files")
|
progressPrinter.V("load index files")
|
||||||
}
|
}
|
||||||
err = repo.LoadIndex(ctx)
|
|
||||||
|
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
|
||||||
|
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -637,6 +651,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||||||
Time: timeStamp,
|
Time: timeStamp,
|
||||||
Hostname: opts.Host,
|
Hostname: opts.Host,
|
||||||
ParentSnapshot: parentSnapshot,
|
ParentSnapshot: parentSnapshot,
|
||||||
|
ProgramVersion: "restic " + version,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
|
569
cmd/restic/cmd_backup_integration_test.go
Normal file
569
cmd/restic/cmd_backup_integration_test.go
Normal file
@@ -0,0 +1,569 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/fs"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
"github.com/restic/restic/internal/ui/termstatus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
|
||||||
|
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||||
|
t.Logf("backing up %v in %v", target, dir)
|
||||||
|
if dir != "" {
|
||||||
|
cleanup := rtest.Chdir(t, dir)
|
||||||
|
defer cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
|
return runBackup(ctx, opts, gopts, term, target)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
|
||||||
|
err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
|
||||||
|
rtest.Assert(t, err == nil, "Error while backing up")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackup(t *testing.T) {
|
||||||
|
testBackup(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupWithFilesystemSnapshots(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
|
||||||
|
testBackup(t, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBackup(t *testing.T, useFsSnapshot bool) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat1 := dirStats(env.repo)
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
stat2 := dirStats(env.repo)
|
||||||
|
if stat2.size > stat1.size+stat1.size/10 {
|
||||||
|
t.Error("repository size has grown by more than 10 percent")
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
// third backup, explicit incremental
|
||||||
|
opts.Parent = snapshotIDs[0].String()
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 3)
|
||||||
|
|
||||||
|
stat3 := dirStats(env.repo)
|
||||||
|
if stat3.size > stat1.size+stat1.size/10 {
|
||||||
|
t.Error("repository size has grown by more than 10 percent")
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
|
||||||
|
|
||||||
|
// restore all backups and compare
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupWithRelativePath(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
|
||||||
|
// that the correct parent snapshot was used
|
||||||
|
latestSn, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
rtest.Assert(t, latestSn != nil, "missing latest snapshot")
|
||||||
|
rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupParentSelection(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
|
||||||
|
firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
// second backup, sibling path
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
// third backup, incremental for the first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
|
||||||
|
|
||||||
|
// test that the correct parent snapshot was used
|
||||||
|
latestSn, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
rtest.Assert(t, latestSn != nil, "missing latest snapshot")
|
||||||
|
rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDryRunBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
dryOpts := BackupOptions{DryRun: true}
|
||||||
|
|
||||||
|
// dry run before first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 0)
|
||||||
|
packIDs := testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Assert(t, len(packIDs) == 0,
|
||||||
|
"expected no data, got %v", snapshotIDs)
|
||||||
|
indexIDs := testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Assert(t, len(indexIDs) == 0,
|
||||||
|
"expected no index, got %v", snapshotIDs)
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 1)
|
||||||
|
packIDs = testRunList(t, "packs", env.gopts)
|
||||||
|
indexIDs = testRunList(t, "index", env.gopts)
|
||||||
|
|
||||||
|
// dry run between backups
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDsAfter := testListSnapshots(t, env.gopts, 1)
|
||||||
|
rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
|
||||||
|
dataIDsAfter := testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Equals(t, packIDs, dataIDsAfter)
|
||||||
|
indexIDsAfter := testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Equals(t, indexIDs, indexIDsAfter)
|
||||||
|
|
||||||
|
// second backup, implicit incremental
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshotIDs = testListSnapshots(t, env.gopts, 2)
|
||||||
|
packIDs = testRunList(t, "packs", env.gopts)
|
||||||
|
indexIDs = testRunList(t, "index", env.gopts)
|
||||||
|
|
||||||
|
// another dry run
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
|
||||||
|
snapshotIDsAfter = testListSnapshots(t, env.gopts, 2)
|
||||||
|
rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
|
||||||
|
dataIDsAfter = testRunList(t, "packs", env.gopts)
|
||||||
|
rtest.Equals(t, packIDs, dataIDsAfter)
|
||||||
|
indexIDsAfter = testRunList(t, "index", env.gopts)
|
||||||
|
rtest.Equals(t, indexIDs, indexIDsAfter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupNonExistingFile(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
|
||||||
|
_ = withRestoreGlobalOptions(func() error {
|
||||||
|
globalOptions.stderr = io.Discard
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "0", "0", "9")
|
||||||
|
dirs := []string{
|
||||||
|
filepath.Join(p, "0"),
|
||||||
|
filepath.Join(p, "1"),
|
||||||
|
filepath.Join(p, "nonexisting"),
|
||||||
|
filepath.Join(p, "5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", dirs, opts, env.gopts)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupSelfHealing(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
p := filepath.Join(env.testdata, "test/test")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 5))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// remove all data packs
|
||||||
|
removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
|
||||||
|
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is also missing the data blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil,
|
||||||
|
"backup should have reported an error")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupTreeLoadError(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
p := filepath.Join(env.testdata, "test/test")
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||||
|
rtest.OK(t, appendRandomData(p, 5))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
// Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
|
||||||
|
testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
|
||||||
|
|
||||||
|
r, err := OpenRepository(context.TODO(), env.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.OK(t, r.LoadIndex(context.TODO(), nil))
|
||||||
|
treePacks := restic.NewIDSet()
|
||||||
|
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||||
|
if pb.Type == restic.TreeBlob {
|
||||||
|
treePacks.Insert(pb.PackID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// delete the subdirectory pack first
|
||||||
|
for id := range treePacks {
|
||||||
|
rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
|
||||||
|
}
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is missing the tree blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// remove all tree packs
|
||||||
|
removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
|
||||||
|
testRunRebuildIndex(t, env.gopts)
|
||||||
|
// now the repo is also missing the data blob in the index; check should report this
|
||||||
|
testRunCheckMustFail(t, env.gopts)
|
||||||
|
// second backup should report an error but "heal" this situation
|
||||||
|
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "backup should have reported an error")
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
var backupExcludeFilenames = []string{
|
||||||
|
"testfile1",
|
||||||
|
"foo.tar.gz",
|
||||||
|
"private/secret/passwords.txt",
|
||||||
|
"work/source/test.c",
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupExclude(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
|
||||||
|
for _, filename := range backupExcludeFilenames {
|
||||||
|
fp := filepath.Join(datadir, filename)
|
||||||
|
rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
|
||||||
|
|
||||||
|
f, err := os.Create(fp)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
fmt.Fprint(f, filename)
|
||||||
|
rtest.OK(t, f.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots := make(map[string]struct{})
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files := testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
|
||||||
|
|
||||||
|
opts.Excludes = []string{"*.tar.gz"}
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files = testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||||
|
|
||||||
|
opts.Excludes = []string{"*.tar.gz", "private/secret"}
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
files = testRunLs(t, env.gopts, snapshotID)
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||||
|
rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
|
||||||
|
"expected file %q not in first snapshot, but it's included", "passwords.txt")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupErrors(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
|
||||||
|
// Assume failure
|
||||||
|
inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
|
||||||
|
rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
|
||||||
|
defer func() {
|
||||||
|
rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
|
||||||
|
}()
|
||||||
|
opts := BackupOptions{}
|
||||||
|
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
|
||||||
|
rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
|
||||||
|
rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
incrementalFirstWrite = 10 * 1042 * 1024
|
||||||
|
incrementalSecondWrite = 1 * 1042 * 1024
|
||||||
|
incrementalThirdWrite = 1 * 1042 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIncrementalBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
testfile := filepath.Join(datadir, "testfile")
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat1 := dirStats(env.repo)
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat2 := dirStats(env.repo)
|
||||||
|
if stat2.size-stat1.size > incrementalFirstWrite {
|
||||||
|
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||||
|
|
||||||
|
rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
stat3 := dirStats(env.repo)
|
||||||
|
if stat3.size-stat2.size > incrementalFirstWrite {
|
||||||
|
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||||
|
}
|
||||||
|
t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupTags(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 0,
|
||||||
|
"expected no tags, got %v", newest.Tags)
|
||||||
|
parent := newest
|
||||||
|
|
||||||
|
opts.Tags = restic.TagLists{[]string{"NL"}}
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
newest, _ = testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||||
|
"expected one NL tag, got %v", newest.Tags)
|
||||||
|
// Tagged backup should have untagged backup as parent.
|
||||||
|
rtest.Assert(t, parent.ID.Equal(*newest.Parent),
|
||||||
|
"expected parent to be %v, got %v", parent.ID, newest.Parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupProgramVersion(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||||
|
newest, _ := testRunSnapshots(t, env.gopts)
|
||||||
|
|
||||||
|
if newest == nil {
|
||||||
|
t.Fatal("expected a backup, got nil")
|
||||||
|
}
|
||||||
|
resticVersion := "restic " + version
|
||||||
|
rtest.Assert(t, newest.ProgramVersion == resticVersion,
|
||||||
|
"expected %v, got %v", resticVersion, newest.ProgramVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQuietBackup(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHardLink(t *testing.T) {
|
||||||
|
// this test assumes a test set with a single directory containing hard linked files
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", "test.hl.tar.gz")
|
||||||
|
fd, err := os.Open(datafile)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.OK(t, fd.Close())
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||||
|
|
||||||
|
linkTests := createFileSetPerHardlink(env.testdata)
|
||||||
|
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// first backup
|
||||||
|
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 1)
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
// restore all backups and compare
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
|
||||||
|
|
||||||
|
linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
|
||||||
|
rtest.Assert(t, linksEqual(linkTests, linkResults),
|
||||||
|
"links are not equal")
|
||||||
|
}
|
||||||
|
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func linksEqual(source, dest map[uint64][]string) bool {
|
||||||
|
for _, vs := range source {
|
||||||
|
found := false
|
||||||
|
for kd, vd := range dest {
|
||||||
|
if linkEqual(vs, vd) {
|
||||||
|
delete(dest, kd)
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(dest) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func linkEqual(source, dest []string) bool {
|
||||||
|
// equal if sliced are equal without considering order
|
||||||
|
if source == nil && dest == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if source == nil || dest == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(source) != len(dest) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range source {
|
||||||
|
found := false
|
||||||
|
for j := range dest {
|
||||||
|
if source[i] == dest[j] {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
@@ -155,7 +155,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = tab.Write(gopts.stdout)
|
_ = tab.Write(globalOptions.stdout)
|
||||||
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
|
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
@@ -13,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var cmdCat = &cobra.Command{
|
var cmdCat = &cobra.Command{
|
||||||
Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
|
Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]",
|
||||||
Short: "Print internal objects to stdout",
|
Short: "Print internal objects to stdout",
|
||||||
Long: `
|
Long: `
|
||||||
The "cat" command is used to print internal objects to stdout.
|
The "cat" command is used to print internal objects to stdout.
|
||||||
@@ -33,9 +34,34 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdCat)
|
cmdRoot.AddCommand(cmdCat)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateCatArgs(args []string) error {
|
||||||
|
var allowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"}
|
||||||
|
|
||||||
|
if len(args) < 1 {
|
||||||
|
return errors.Fatal("type not specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
validType := false
|
||||||
|
for _, v := range allowedCmds {
|
||||||
|
if v == args[0] {
|
||||||
|
validType = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !validType {
|
||||||
|
return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(allowedCmds, "|"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[0] != "masterkey" && args[0] != "config" && len(args) != 2 {
|
||||||
|
return errors.Fatal("ID not specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||||
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
|
if err := validateCatArgs(args); err != nil {
|
||||||
return errors.Fatal("type or ID not specified")
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := OpenRepository(ctx, gopts)
|
repo, err := OpenRepository(ctx, gopts)
|
||||||
@@ -45,7 +71,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -55,7 +81,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
tpe := args[0]
|
tpe := args[0]
|
||||||
|
|
||||||
var id restic.ID
|
var id restic.ID
|
||||||
if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" {
|
if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" && tpe != "tree" {
|
||||||
id, err = restic.ParseID(args[1])
|
id, err = restic.ParseID(args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("unable to parse ID: %v\n", err)
|
return errors.Fatalf("unable to parse ID: %v\n", err)
|
||||||
@@ -72,7 +98,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
Println(string(buf))
|
Println(string(buf))
|
||||||
return nil
|
return nil
|
||||||
case "index":
|
case "index":
|
||||||
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
|
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -80,7 +106,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
Println(string(buf))
|
Println(string(buf))
|
||||||
return nil
|
return nil
|
||||||
case "snapshot":
|
case "snapshot":
|
||||||
sn, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
sn, _, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("could not find snapshot: %v\n", err)
|
return errors.Fatalf("could not find snapshot: %v\n", err)
|
||||||
}
|
}
|
||||||
@@ -143,7 +169,8 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
return err
|
return err
|
||||||
|
|
||||||
case "blob":
|
case "blob":
|
||||||
err = repo.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -165,6 +192,30 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return errors.Fatal("blob not found")
|
return errors.Fatal("blob not found")
|
||||||
|
|
||||||
|
case "tree":
|
||||||
|
sn, subfolder, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Fatalf("could not find snapshot: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, *sn.Tree, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = globalOptions.stdout.Write(buf)
|
||||||
|
return err
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return errors.Fatal("invalid type")
|
return errors.Fatal("invalid type")
|
||||||
}
|
}
|
||||||
|
30
cmd/restic/cmd_cat_test.go
Normal file
30
cmd/restic/cmd_cat_test.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCatArgsValidation(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
args []string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{[]string{}, "Fatal: type not specified"},
|
||||||
|
{[]string{"masterkey"}, ""},
|
||||||
|
{[]string{"invalid"}, `Fatal: invalid type "invalid"`},
|
||||||
|
{[]string{"snapshot"}, "Fatal: ID not specified"},
|
||||||
|
{[]string{"snapshot", "12345678"}, ""},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
err := validateCatArgs(test.args)
|
||||||
|
if test.err == "" {
|
||||||
|
rtest.Assert(t, err == nil, "unexpected error %q", err)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, strings.Contains(err.Error(), test.err), "unexpected error expected %q to contain %q", err, test.err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/fs"
|
"github.com/restic/restic/internal/fs"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
|
"github.com/restic/restic/internal/ui"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cmdCheck = &cobra.Command{
|
var cmdCheck = &cobra.Command{
|
||||||
@@ -65,7 +66,7 @@ func init() {
|
|||||||
// MarkDeprecated only returns an error when the flag is not found
|
// MarkDeprecated only returns an error when the flag is not found
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
|
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use existing cache, only read uncached data from repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFlags(opts CheckOptions) error {
|
func checkFlags(opts CheckOptions) error {
|
||||||
@@ -97,7 +98,7 @@ func checkFlags(opts CheckOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
fileSize, err := parseSizeStr(opts.ReadDataSubset)
|
fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return argumentError
|
return argumentError
|
||||||
}
|
}
|
||||||
@@ -211,7 +212,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
Verbosef("create exclusive lock for repository\n")
|
Verbosef("create exclusive lock for repository\n")
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -225,7 +226,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("load indexes\n")
|
Verbosef("load indexes\n")
|
||||||
hints, errs := chkr.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
hints, errs := chkr.LoadIndex(ctx, bar)
|
||||||
|
|
||||||
errorsFound := false
|
errorsFound := false
|
||||||
suggestIndexRebuild := false
|
suggestIndexRebuild := false
|
||||||
@@ -245,7 +247,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
}
|
}
|
||||||
|
|
||||||
if suggestIndexRebuild {
|
if suggestIndexRebuild {
|
||||||
Printf("Duplicate packs/old indexes are non-critical, you can run `restic rebuild-index' to correct this.\n")
|
Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
|
||||||
}
|
}
|
||||||
if mixedFound {
|
if mixedFound {
|
||||||
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
||||||
@@ -328,11 +330,28 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
|
|
||||||
go chkr.ReadPacks(ctx, packs, p, errChan)
|
go chkr.ReadPacks(ctx, packs, p, errChan)
|
||||||
|
|
||||||
|
var salvagePacks restic.IDs
|
||||||
|
|
||||||
for err := range errChan {
|
for err := range errChan {
|
||||||
errorsFound = true
|
errorsFound = true
|
||||||
Warnf("%v\n", err)
|
Warnf("%v\n", err)
|
||||||
|
if err, ok := err.(*checker.ErrPackData); ok {
|
||||||
|
if strings.Contains(err.Error(), "wrong data returned, hash is") {
|
||||||
|
salvagePacks = append(salvagePacks, err.PackID)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
p.Done()
|
p.Done()
|
||||||
|
|
||||||
|
if len(salvagePacks) > 0 {
|
||||||
|
Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n")
|
||||||
|
var strIds []string
|
||||||
|
for _, id := range salvagePacks {
|
||||||
|
strIds = append(strIds, id.String())
|
||||||
|
}
|
||||||
|
Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIds, " "))
|
||||||
|
Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
@@ -363,7 +382,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||||||
if repoSize == 0 {
|
if repoSize == 0 {
|
||||||
return errors.Fatal("Cannot read from a repository having size 0")
|
return errors.Fatal("Cannot read from a repository having size 0")
|
||||||
}
|
}
|
||||||
subsetSize, _ := parseSizeStr(opts.ReadDataSubset)
|
subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
|
||||||
if subsetSize > repoSize {
|
if subsetSize > repoSize {
|
||||||
subsetSize = repoSize
|
subsetSize = repoSize
|
||||||
}
|
}
|
||||||
|
34
cmd/restic/cmd_check_integration_test.go
Normal file
34
cmd/restic/cmd_check_integration_test.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunCheck(t testing.TB, gopts GlobalOptions) {
|
||||||
|
t.Helper()
|
||||||
|
output, err := testRunCheckOutput(gopts, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(output)
|
||||||
|
t.Fatalf("unexpected error: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) {
|
||||||
|
t.Helper()
|
||||||
|
_, err := testRunCheckOutput(gopts, false)
|
||||||
|
rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
opts := CheckOptions{
|
||||||
|
ReadData: true,
|
||||||
|
CheckUnused: checkUnused,
|
||||||
|
}
|
||||||
|
return runCheck(context.TODO(), opts, gopts, nil)
|
||||||
|
})
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
@@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -74,14 +75,14 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var srcLock *restic.Lock
|
var srcLock *restic.Lock
|
||||||
srcLock, ctx, err = lockRepo(ctx, srcRepo)
|
srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(srcLock)
|
defer unlockRepo(srcLock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dstLock, ctx, err := lockRepo(ctx, dstRepo)
|
dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(dstLock)
|
defer unlockRepo(dstLock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -98,12 +99,13 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Loading source index")
|
debug.Log("Loading source index")
|
||||||
if err := srcRepo.LoadIndex(ctx); err != nil {
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
if err := srcRepo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
bar = newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
debug.Log("Loading destination index")
|
debug.Log("Loading destination index")
|
||||||
if err := dstRepo.LoadIndex(ctx); err != nil {
|
if err := dstRepo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,5 +238,8 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
|||||||
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
||||||
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
|
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
return err
|
if err != nil {
|
||||||
|
return errors.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
136
cmd/restic/cmd_copy_integration_test.go
Normal file
136
cmd/restic/cmd_copy_integration_test.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
|
||||||
|
gopts := srcGopts
|
||||||
|
gopts.Repo = dstGopts.Repo
|
||||||
|
gopts.password = dstGopts.password
|
||||||
|
copyOpts := CopyOptions{
|
||||||
|
secondaryRepoOptions: secondaryRepoOptions{
|
||||||
|
Repo: srcGopts.Repo,
|
||||||
|
password: srcGopts.password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopy(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
|
||||||
|
snapshotIDs := testListSnapshots(t, env.gopts, 3)
|
||||||
|
copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3)
|
||||||
|
|
||||||
|
// Check that the copies size seems reasonable
|
||||||
|
stat := dirStats(env.repo)
|
||||||
|
stat2 := dirStats(env2.repo)
|
||||||
|
sizeDiff := int64(stat.size) - int64(stat2.size)
|
||||||
|
if sizeDiff < 0 {
|
||||||
|
sizeDiff = -sizeDiff
|
||||||
|
}
|
||||||
|
rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
|
||||||
|
stat.size, stat2.size)
|
||||||
|
|
||||||
|
// Check integrity of the copy
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
|
||||||
|
// Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
|
||||||
|
origRestores := make(map[string]struct{})
|
||||||
|
for i, snapshotID := range snapshotIDs {
|
||||||
|
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||||
|
origRestores[restoredir] = struct{}{}
|
||||||
|
testRunRestore(t, env.gopts, restoredir, snapshotID)
|
||||||
|
}
|
||||||
|
for i, snapshotID := range copiedSnapshotIDs {
|
||||||
|
restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
|
||||||
|
testRunRestore(t, env2.gopts, restoredir, snapshotID)
|
||||||
|
foundMatch := false
|
||||||
|
for cmpdir := range origRestores {
|
||||||
|
diff := directoriesContentsDiff(restoredir, cmpdir)
|
||||||
|
if diff == "" {
|
||||||
|
delete(origRestores, cmpdir)
|
||||||
|
foundMatch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyIncremental(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
|
||||||
|
testListSnapshots(t, env.gopts, 2)
|
||||||
|
testListSnapshots(t, env2.gopts, 2)
|
||||||
|
|
||||||
|
// Check that the copies size seems reasonable
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
|
||||||
|
// check that no snapshots are copied, as there are no new ones
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env2.gopts, 2)
|
||||||
|
|
||||||
|
// check that only new snapshots are copied
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
testListSnapshots(t, env2.gopts, 3)
|
||||||
|
|
||||||
|
// also test the reverse direction
|
||||||
|
testRunCopy(t, env2.gopts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyUnstableJSON(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
// contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
|
||||||
|
datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
testRunCopy(t, env.gopts, env2.gopts)
|
||||||
|
testRunCheck(t, env2.gopts)
|
||||||
|
testListSnapshots(t, env2.gopts, 1)
|
||||||
|
}
|
@@ -134,7 +134,7 @@ func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func dumpIndexes(ctx context.Context, repo restic.Repository, wr io.Writer) error {
|
func dumpIndexes(ctx context.Context, repo restic.Repository, wr io.Writer) error {
|
||||||
return index.ForAllIndexes(ctx, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
return index.ForAllIndexes(ctx, repo.Backend(), repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
||||||
Printf("index_id: %v\n", id)
|
Printf("index_id: %v\n", id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -156,7 +156,7 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -167,20 +167,20 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||||||
|
|
||||||
switch tpe {
|
switch tpe {
|
||||||
case "indexes":
|
case "indexes":
|
||||||
return dumpIndexes(ctx, repo, gopts.stdout)
|
return dumpIndexes(ctx, repo, globalOptions.stdout)
|
||||||
case "snapshots":
|
case "snapshots":
|
||||||
return debugPrintSnapshots(ctx, repo, gopts.stdout)
|
return debugPrintSnapshots(ctx, repo, globalOptions.stdout)
|
||||||
case "packs":
|
case "packs":
|
||||||
return printPacks(ctx, repo, gopts.stdout)
|
return printPacks(ctx, repo, globalOptions.stdout)
|
||||||
case "all":
|
case "all":
|
||||||
Printf("snapshots:\n")
|
Printf("snapshots:\n")
|
||||||
err := debugPrintSnapshots(ctx, repo, gopts.stdout)
|
err := debugPrintSnapshots(ctx, repo, globalOptions.stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Printf("\nindexes:\n")
|
Printf("\nindexes:\n")
|
||||||
err = dumpIndexes(ctx, repo, gopts.stdout)
|
err = dumpIndexes(ctx, repo, globalOptions.stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -462,14 +462,15 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = repo.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -16,7 +16,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var cmdDiff = &cobra.Command{
|
var cmdDiff = &cobra.Command{
|
||||||
Use: "diff [flags] snapshot-ID snapshot-ID",
|
Use: "diff [flags] snapshotID snapshotID",
|
||||||
Short: "Show differences between two snapshots",
|
Short: "Show differences between two snapshots",
|
||||||
Long: `
|
Long: `
|
||||||
The "diff" command shows differences from the first to the second snapshot. The
|
The "diff" command shows differences from the first to the second snapshot. The
|
||||||
@@ -29,6 +29,10 @@ directory:
|
|||||||
* M The file's content was modified
|
* M The file's content was modified
|
||||||
* T The type was changed, e.g. a file was made a symlink
|
* T The type was changed, e.g. a file was made a symlink
|
||||||
|
|
||||||
|
To only compare files in specific subfolders, you can use the
|
||||||
|
"<snapshotID>:<subfolder>" syntax, where "subfolder" is a path within the
|
||||||
|
snapshot.
|
||||||
|
|
||||||
EXIT STATUS
|
EXIT STATUS
|
||||||
===========
|
===========
|
||||||
|
|
||||||
@@ -54,12 +58,12 @@ func init() {
|
|||||||
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
|
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, error) {
|
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, string, error) {
|
||||||
sn, err := restic.FindSnapshot(ctx, be, repo, desc)
|
sn, subfolder, err := restic.FindSnapshot(ctx, be, repo, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Fatal(err.Error())
|
return nil, "", errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
return sn, err
|
return sn, subfolder, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comparer collects all things needed to compare two snapshots.
|
// Comparer collects all things needed to compare two snapshots.
|
||||||
@@ -334,7 +338,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -346,12 +350,12 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sn1, err := loadSnapshot(ctx, be, repo, args[0])
|
sn1, subfolder1, err := loadSnapshot(ctx, be, repo, args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sn2, err := loadSnapshot(ctx, be, repo, args[1])
|
sn2, subfolder2, err := loadSnapshot(ctx, be, repo, args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -359,8 +363,8 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
Verbosef("comparing snapshot %v to %v:\n\n", sn1.ID().Str(), sn2.ID().Str())
|
Verbosef("comparing snapshot %v to %v:\n\n", sn1.ID().Str(), sn2.ID().Str())
|
||||||
}
|
}
|
||||||
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
if err = repo.LoadIndex(ctx); err != nil {
|
if err = repo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -372,6 +376,16 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
|
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sn1.Tree, err = restic.FindTreeDirectory(ctx, repo, sn1.Tree, subfolder1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn2.Tree, err = restic.FindTreeDirectory(ctx, repo, sn2.Tree, subfolder2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
c := &Comparer{
|
c := &Comparer{
|
||||||
repo: repo,
|
repo: repo,
|
||||||
opts: diffOptions,
|
opts: diffOptions,
|
||||||
@@ -381,7 +395,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
enc := json.NewEncoder(gopts.stdout)
|
enc := json.NewEncoder(globalOptions.stdout)
|
||||||
c.printChange = func(change *Change) {
|
c.printChange = func(change *Change) {
|
||||||
err := enc.Encode(change)
|
err := enc.Encode(change)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -415,7 +429,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||||||
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
|
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
err := json.NewEncoder(gopts.stdout).Encode(stats)
|
err := json.NewEncoder(globalOptions.stdout).Encode(stats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Warnf("JSON encode failed: %v\n", err)
|
Warnf("JSON encode failed: %v\n", err)
|
||||||
}
|
}
|
||||||
|
193
cmd/restic/cmd_diff_integration_test.go
Normal file
193
cmd/restic/cmd_diff_integration_test.go
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
opts := DiffOptions{
|
||||||
|
ShowMetadata: false,
|
||||||
|
}
|
||||||
|
return runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
|
||||||
|
})
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFile(dst string, src string) error {
|
||||||
|
srcFile, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstFile, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = srcFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(dstFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = srcFile.Close()
|
||||||
|
_ = dstFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = srcFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
// ignore subsequent errors
|
||||||
|
_ = dstFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dstFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var diffOutputRegexPatterns = []string{
|
||||||
|
"-.+modfile",
|
||||||
|
"M.+modfile1",
|
||||||
|
"\\+.+modfile2",
|
||||||
|
"\\+.+modfile3",
|
||||||
|
"\\+.+modfile4",
|
||||||
|
"-.+submoddir",
|
||||||
|
"-.+submoddir.subsubmoddir",
|
||||||
|
"\\+.+submoddir2",
|
||||||
|
"\\+.+submoddir2.subsubmoddir",
|
||||||
|
"Files: +2 new, +1 removed, +1 changed",
|
||||||
|
"Dirs: +3 new, +2 removed",
|
||||||
|
"Data Blobs: +2 new, +1 removed",
|
||||||
|
"Added: +7[0-9]{2}\\.[0-9]{3} KiB",
|
||||||
|
"Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
datadir := filepath.Join(env.base, "testdata")
|
||||||
|
testdir := filepath.Join(datadir, "testdir")
|
||||||
|
subtestdir := filepath.Join(testdir, "subtestdir")
|
||||||
|
testfile := filepath.Join(testdir, "testfile")
|
||||||
|
|
||||||
|
rtest.OK(t, os.Mkdir(testdir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(subtestdir, 0755))
|
||||||
|
rtest.OK(t, appendRandomData(testfile, 256*1024))
|
||||||
|
|
||||||
|
moddir := filepath.Join(datadir, "moddir")
|
||||||
|
submoddir := filepath.Join(moddir, "submoddir")
|
||||||
|
subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
|
||||||
|
modfile := filepath.Join(moddir, "modfile")
|
||||||
|
rtest.OK(t, os.Mkdir(moddir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(submoddir, 0755))
|
||||||
|
rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
|
||||||
|
rtest.OK(t, copyFile(modfile, testfile))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
|
||||||
|
|
||||||
|
snapshots := make(map[string]struct{})
|
||||||
|
opts := BackupOptions{}
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
|
||||||
|
rtest.OK(t, os.Rename(modfile, modfile+"3"))
|
||||||
|
rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
|
||||||
|
rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
|
||||||
|
rtest.OK(t, os.Mkdir(modfile+"4", 0755))
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
|
||||||
|
_, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||||
|
|
||||||
|
return env, cleanup, firstSnapshotID, secondSnapshotID
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiff(t *testing.T) {
|
||||||
|
env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// quiet suppresses the diff output except for the summary
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
_, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
|
||||||
|
rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
|
||||||
|
|
||||||
|
out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
for _, pattern := range diffOutputRegexPatterns {
|
||||||
|
r, err := regexp.Compile(pattern)
|
||||||
|
rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
|
||||||
|
rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check quiet output
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
type typeSniffer struct {
|
||||||
|
MessageType string `json:"message_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiffJSON(t *testing.T) {
|
||||||
|
env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// quiet suppresses the diff output except for the summary
|
||||||
|
env.gopts.Quiet = false
|
||||||
|
env.gopts.JSON = true
|
||||||
|
out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
var stat DiffStatsContainer
|
||||||
|
var changes int
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(out))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
var sniffer typeSniffer
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
|
||||||
|
switch sniffer.MessageType {
|
||||||
|
case "change":
|
||||||
|
changes++
|
||||||
|
case "statistics":
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(line), &stat))
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected message type %v", sniffer.MessageType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rtest.Equals(t, 9, changes)
|
||||||
|
rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
|
||||||
|
stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
|
||||||
|
stat.ChangedFiles == 1, "unexpected statistics")
|
||||||
|
|
||||||
|
// check quiet output
|
||||||
|
env.gopts.Quiet = true
|
||||||
|
outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
stat = DiffStatsContainer{}
|
||||||
|
rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
|
||||||
|
rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
|
||||||
|
stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
|
||||||
|
stat.ChangedFiles == 1, "unexpected statistics")
|
||||||
|
rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
|
||||||
|
}
|
@@ -24,9 +24,13 @@ single file is selected, it prints its contents to stdout. Folders are output
|
|||||||
as a tar (default) or zip file containing the contents of the specified folder.
|
as a tar (default) or zip file containing the contents of the specified folder.
|
||||||
Pass "/" as file name to dump the whole snapshot as an archive file.
|
Pass "/" as file name to dump the whole snapshot as an archive file.
|
||||||
|
|
||||||
The special snapshot "latest" can be used to use the latest snapshot in the
|
The special snapshotID "latest" can be used to use the latest snapshot in the
|
||||||
repository.
|
repository.
|
||||||
|
|
||||||
|
To include the folder content at the root of the archive, you can use the
|
||||||
|
"<snapshotID>:<subfolder>" syntax, where "subfolder" is a path within the
|
||||||
|
snapshot.
|
||||||
|
|
||||||
EXIT STATUS
|
EXIT STATUS
|
||||||
===========
|
===========
|
||||||
|
|
||||||
@@ -132,14 +136,14 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := (&restic.SnapshotFilter{
|
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||||
Hosts: opts.Hosts,
|
Hosts: opts.Hosts,
|
||||||
Paths: opts.Paths,
|
Paths: opts.Paths,
|
||||||
Tags: opts.Tags,
|
Tags: opts.Tags,
|
||||||
@@ -148,7 +152,13 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
|||||||
return errors.Fatalf("failed to find snapshot: %v", err)
|
return errors.Fatalf("failed to find snapshot: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = repo.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -51,6 +51,7 @@ type FindOptions struct {
|
|||||||
PackID, ShowPackID bool
|
PackID, ShowPackID bool
|
||||||
CaseInsensitive bool
|
CaseInsensitive bool
|
||||||
ListLong bool
|
ListLong bool
|
||||||
|
HumanReadable bool
|
||||||
restic.SnapshotFilter
|
restic.SnapshotFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,6 +70,7 @@ func init() {
|
|||||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
||||||
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
||||||
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||||
|
f.BoolVar(&findOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
|
||||||
|
|
||||||
initMultiSnapshotFilter(f, &findOptions.SnapshotFilter, true)
|
initMultiSnapshotFilter(f, &findOptions.SnapshotFilter, true)
|
||||||
}
|
}
|
||||||
@@ -104,12 +106,13 @@ func parseTime(str string) (time.Time, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type statefulOutput struct {
|
type statefulOutput struct {
|
||||||
ListLong bool
|
ListLong bool
|
||||||
JSON bool
|
HumanReadable bool
|
||||||
inuse bool
|
JSON bool
|
||||||
newsn *restic.Snapshot
|
inuse bool
|
||||||
oldsn *restic.Snapshot
|
newsn *restic.Snapshot
|
||||||
hits int
|
oldsn *restic.Snapshot
|
||||||
|
hits int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
||||||
@@ -123,7 +126,6 @@ func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
|
|||||||
|
|
||||||
// Make the following attributes disappear
|
// Make the following attributes disappear
|
||||||
Name byte `json:"name,omitempty"`
|
Name byte `json:"name,omitempty"`
|
||||||
Inode byte `json:"inode,omitempty"`
|
|
||||||
ExtendedAttributes byte `json:"extended_attributes,omitempty"`
|
ExtendedAttributes byte `json:"extended_attributes,omitempty"`
|
||||||
Device byte `json:"device,omitempty"`
|
Device byte `json:"device,omitempty"`
|
||||||
Content byte `json:"content,omitempty"`
|
Content byte `json:"content,omitempty"`
|
||||||
@@ -164,7 +166,7 @@ func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) {
|
|||||||
s.oldsn = s.newsn
|
s.oldsn = s.newsn
|
||||||
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
|
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
|
||||||
}
|
}
|
||||||
Println(formatNode(path, node, s.ListLong))
|
Println(formatNode(path, node, s.ListLong, s.HumanReadable))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
|
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
|
||||||
@@ -501,7 +503,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
|||||||
return packIDs
|
return packIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||||
idx := f.repo.Index()
|
idx := f.repo.Index()
|
||||||
|
|
||||||
rid, err := restic.ParseID(id)
|
rid, err := restic.ParseID(id)
|
||||||
@@ -524,13 +526,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
func (f *Finder) findObjectsPacks() {
|
||||||
for i := range f.blobIDs {
|
for i := range f.blobIDs {
|
||||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
f.findObjectPack(i, restic.DataBlob)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range f.treeIDs {
|
for i := range f.treeIDs {
|
||||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
f.findObjectPack(i, restic.TreeBlob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -575,7 +577,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -586,15 +588,15 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
if err = repo.LoadIndex(ctx); err != nil {
|
if err = repo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Finder{
|
f := &Finder{
|
||||||
repo: repo,
|
repo: repo,
|
||||||
pat: pat,
|
pat: pat,
|
||||||
out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
|
out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
|
||||||
ignoreTrees: restic.NewIDSet(),
|
ignoreTrees: restic.NewIDSet(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -618,7 +620,16 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var filteredSnapshots []*restic.Snapshot
|
||||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
||||||
|
filteredSnapshots = append(filteredSnapshots, sn)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(filteredSnapshots, func(i, j int) bool {
|
||||||
|
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, sn := range filteredSnapshots {
|
||||||
if f.blobIDs != nil || f.treeIDs != nil {
|
if f.blobIDs != nil || f.treeIDs != nil {
|
||||||
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
|
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
|
||||||
return err
|
return err
|
||||||
@@ -632,7 +643,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||||||
f.out.Finish()
|
f.out.Finish()
|
||||||
|
|
||||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||||
f.findObjectsPacks(ctx)
|
f.findObjectsPacks()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
87
cmd/restic/cmd_find_integration_test.go
Normal file
87
cmd/restic/cmd_find_integration_test.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.JSON = wantJSON
|
||||||
|
|
||||||
|
opts := FindOptions{}
|
||||||
|
return runFind(context.TODO(), opts, gopts, []string{pattern})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFind(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
results := testRunFind(t, false, env.gopts, "unexistingfile")
|
||||||
|
rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, false, env.gopts, "testfile")
|
||||||
|
lines := strings.Split(string(results), "\n")
|
||||||
|
rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, false, env.gopts, "testfile*")
|
||||||
|
lines = strings.Split(string(results), "\n")
|
||||||
|
rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testMatch struct {
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
Permissions string `json:"permissions,omitempty"`
|
||||||
|
Size uint64 `json:"size,omitempty"`
|
||||||
|
Date time.Time `json:"date,omitempty"`
|
||||||
|
UID uint32 `json:"uid,omitempty"`
|
||||||
|
GID uint32 `json:"gid,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type testMatches struct {
|
||||||
|
Hits int `json:"hits,omitempty"`
|
||||||
|
SnapshotID string `json:"snapshot,omitempty"`
|
||||||
|
Matches []testMatch `json:"matches,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindJSON(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
results := testRunFind(t, true, env.gopts, "unexistingfile")
|
||||||
|
matches := []testMatches{}
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, true, env.gopts, "testfile")
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||||
|
rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
|
||||||
|
rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
|
||||||
|
|
||||||
|
results = testRunFind(t, true, env.gopts, "testfile*")
|
||||||
|
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||||
|
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||||
|
rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
|
||||||
|
rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
|
||||||
|
}
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
@@ -36,14 +37,49 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ForgetPolicyCount int
|
||||||
|
|
||||||
|
var ErrNegativePolicyCount = errors.New("negative values not allowed, use 'unlimited' instead")
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) Set(s string) error {
|
||||||
|
switch s {
|
||||||
|
case "unlimited":
|
||||||
|
*c = -1
|
||||||
|
default:
|
||||||
|
val, err := strconv.ParseInt(s, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if val < 0 {
|
||||||
|
return ErrNegativePolicyCount
|
||||||
|
}
|
||||||
|
*c = ForgetPolicyCount(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) String() string {
|
||||||
|
switch *c {
|
||||||
|
case -1:
|
||||||
|
return "unlimited"
|
||||||
|
default:
|
||||||
|
return strconv.FormatInt(int64(*c), 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ForgetPolicyCount) Type() string {
|
||||||
|
return "n"
|
||||||
|
}
|
||||||
|
|
||||||
// ForgetOptions collects all options for the forget command.
|
// ForgetOptions collects all options for the forget command.
|
||||||
type ForgetOptions struct {
|
type ForgetOptions struct {
|
||||||
Last int
|
Last ForgetPolicyCount
|
||||||
Hourly int
|
Hourly ForgetPolicyCount
|
||||||
Daily int
|
Daily ForgetPolicyCount
|
||||||
Weekly int
|
Weekly ForgetPolicyCount
|
||||||
Monthly int
|
Monthly ForgetPolicyCount
|
||||||
Yearly int
|
Yearly ForgetPolicyCount
|
||||||
Within restic.Duration
|
Within restic.Duration
|
||||||
WithinHourly restic.Duration
|
WithinHourly restic.Duration
|
||||||
WithinDaily restic.Duration
|
WithinDaily restic.Duration
|
||||||
@@ -56,7 +92,7 @@ type ForgetOptions struct {
|
|||||||
Compact bool
|
Compact bool
|
||||||
|
|
||||||
// Grouping
|
// Grouping
|
||||||
GroupBy string
|
GroupBy restic.SnapshotGroupByOptions
|
||||||
DryRun bool
|
DryRun bool
|
||||||
Prune bool
|
Prune bool
|
||||||
}
|
}
|
||||||
@@ -67,12 +103,12 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdForget)
|
cmdRoot.AddCommand(cmdForget)
|
||||||
|
|
||||||
f := cmdForget.Flags()
|
f := cmdForget.Flags()
|
||||||
f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
|
f.VarP(&forgetOptions.Last, "keep-last", "l", "keep the last `n` snapshots (use 'unlimited' to keep all snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
|
f.VarP(&forgetOptions.Hourly, "keep-hourly", "H", "keep the last `n` hourly snapshots (use 'unlimited' to keep all hourly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
|
f.VarP(&forgetOptions.Daily, "keep-daily", "d", "keep the last `n` daily snapshots (use 'unlimited' to keep all daily snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
|
f.VarP(&forgetOptions.Weekly, "keep-weekly", "w", "keep the last `n` weekly snapshots (use 'unlimited' to keep all weekly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
|
f.VarP(&forgetOptions.Monthly, "keep-monthly", "m", "keep the last `n` monthly snapshots (use 'unlimited' to keep all monthly snapshots)")
|
||||||
f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
|
f.VarP(&forgetOptions.Yearly, "keep-yearly", "y", "keep the last `n` yearly snapshots (use 'unlimited' to keep all yearly snapshots)")
|
||||||
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
|
||||||
@@ -90,8 +126,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
|
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
|
||||||
|
forgetOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||||
f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
f.VarP(&forgetOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
|
||||||
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
||||||
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
|
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
|
||||||
|
|
||||||
@@ -99,8 +135,29 @@ func init() {
|
|||||||
addPruneOptions(cmdForget)
|
addPruneOptions(cmdForget)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyForgetOptions(opts *ForgetOptions) error {
|
||||||
|
if opts.Last < -1 || opts.Hourly < -1 || opts.Daily < -1 || opts.Weekly < -1 ||
|
||||||
|
opts.Monthly < -1 || opts.Yearly < -1 {
|
||||||
|
return errors.Fatal("negative values other than -1 are not allowed for --keep-*")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range []restic.Duration{opts.Within, opts.WithinHourly, opts.WithinDaily,
|
||||||
|
opts.WithinMonthly, opts.WithinWeekly, opts.WithinYearly} {
|
||||||
|
if d.Hours < 0 || d.Days < 0 || d.Months < 0 || d.Years < 0 {
|
||||||
|
return errors.Fatal("durations containing negative values are not allowed for --keep-within*")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||||
err := verifyPruneOptions(&pruneOptions)
|
err := verifyForgetOptions(&opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifyPruneOptions(&pruneOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -116,7 +173,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
|
|
||||||
if !opts.DryRun || !gopts.NoLock {
|
if !opts.DryRun || !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -144,12 +201,12 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
policy := restic.ExpirePolicy{
|
policy := restic.ExpirePolicy{
|
||||||
Last: opts.Last,
|
Last: int(opts.Last),
|
||||||
Hourly: opts.Hourly,
|
Hourly: int(opts.Hourly),
|
||||||
Daily: opts.Daily,
|
Daily: int(opts.Daily),
|
||||||
Weekly: opts.Weekly,
|
Weekly: int(opts.Weekly),
|
||||||
Monthly: opts.Monthly,
|
Monthly: int(opts.Monthly),
|
||||||
Yearly: opts.Yearly,
|
Yearly: int(opts.Yearly),
|
||||||
Within: opts.Within,
|
Within: opts.Within,
|
||||||
WithinHourly: opts.WithinHourly,
|
WithinHourly: opts.WithinHourly,
|
||||||
WithinDaily: opts.WithinDaily,
|
WithinDaily: opts.WithinDaily,
|
||||||
@@ -172,7 +229,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
|
|
||||||
for k, snapshotGroup := range snapshotGroups {
|
for k, snapshotGroup := range snapshotGroups {
|
||||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||||
err = PrintSnapshotGroupHeader(gopts.stdout, k)
|
err = PrintSnapshotGroupHeader(globalOptions.stdout, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -229,7 +286,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gopts.JSON && len(jsonGroups) > 0 {
|
if gopts.JSON && len(jsonGroups) > 0 {
|
||||||
err = printJSONForget(gopts.stdout, jsonGroups)
|
err = printJSONForget(globalOptions.stdout, jsonGroups)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
13
cmd/restic/cmd_forget_integration_test.go
Normal file
13
cmd/restic/cmd_forget_integration_test.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
|
opts := ForgetOptions{}
|
||||||
|
rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
|
||||||
|
}
|
94
cmd/restic/cmd_forget_test.go
Normal file
94
cmd/restic/cmd_forget_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestForgetPolicyValues(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
value ForgetPolicyCount
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{"0", ForgetPolicyCount(0), ""},
|
||||||
|
{"1", ForgetPolicyCount(1), ""},
|
||||||
|
{"unlimited", ForgetPolicyCount(-1), ""},
|
||||||
|
{"", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"\": invalid syntax"},
|
||||||
|
{"-1", ForgetPolicyCount(0), ErrNegativePolicyCount.Error()},
|
||||||
|
{"abc", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"abc\": invalid syntax"},
|
||||||
|
}
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
var count ForgetPolicyCount
|
||||||
|
err := count.Set(testCase.input)
|
||||||
|
|
||||||
|
if testCase.err != "" {
|
||||||
|
rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
|
||||||
|
rtest.Equals(t, testCase.err, err.Error())
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, err == nil, "expected no error for input %+v, got %v", testCase.input, err)
|
||||||
|
rtest.Equals(t, testCase.value, count)
|
||||||
|
rtest.Equals(t, testCase.input, count.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestForgetOptionValues(t *testing.T) {
|
||||||
|
const negValErrorMsg = "Fatal: negative values other than -1 are not allowed for --keep-*"
|
||||||
|
const negDurationValErrorMsg = "Fatal: durations containing negative values are not allowed for --keep-within*"
|
||||||
|
testCases := []struct {
|
||||||
|
input ForgetOptions
|
||||||
|
errorMsg string
|
||||||
|
}{
|
||||||
|
{ForgetOptions{Last: 1}, ""},
|
||||||
|
{ForgetOptions{Hourly: 1}, ""},
|
||||||
|
{ForgetOptions{Daily: 1}, ""},
|
||||||
|
{ForgetOptions{Weekly: 1}, ""},
|
||||||
|
{ForgetOptions{Monthly: 1}, ""},
|
||||||
|
{ForgetOptions{Yearly: 1}, ""},
|
||||||
|
{ForgetOptions{Last: 0}, ""},
|
||||||
|
{ForgetOptions{Hourly: 0}, ""},
|
||||||
|
{ForgetOptions{Daily: 0}, ""},
|
||||||
|
{ForgetOptions{Weekly: 0}, ""},
|
||||||
|
{ForgetOptions{Monthly: 0}, ""},
|
||||||
|
{ForgetOptions{Yearly: 0}, ""},
|
||||||
|
{ForgetOptions{Last: -1}, ""},
|
||||||
|
{ForgetOptions{Hourly: -1}, ""},
|
||||||
|
{ForgetOptions{Daily: -1}, ""},
|
||||||
|
{ForgetOptions{Weekly: -1}, ""},
|
||||||
|
{ForgetOptions{Monthly: -1}, ""},
|
||||||
|
{ForgetOptions{Yearly: -1}, ""},
|
||||||
|
{ForgetOptions{Last: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Hourly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Daily: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Weekly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Monthly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Yearly: -2}, negValErrorMsg},
|
||||||
|
{ForgetOptions{Within: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
|
||||||
|
{ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
|
||||||
|
{ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
|
||||||
|
{ForgetOptions{Within: restic.ParseDurationOrPanic("-1y2m3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y-2m3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m-3d3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d-3h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("-2y4m6d8h")}, negDurationValErrorMsg},
|
||||||
|
{ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y-4m6d8h")}, negDurationValErrorMsg},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
err := verifyForgetOptions(&testCase.input)
|
||||||
|
if testCase.errorMsg != "" {
|
||||||
|
rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
|
||||||
|
rtest.Equals(t, testCase.errorMsg, err.Error())
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, err == nil, "expected no error for input %+v", testCase.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -63,26 +63,38 @@ func writeManpages(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func writeBashCompletion(file string) error {
|
func writeBashCompletion(file string) error {
|
||||||
Verbosef("writing bash completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing bash completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenBashCompletionFile(file)
|
return cmdRoot.GenBashCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeFishCompletion(file string) error {
|
func writeFishCompletion(file string) error {
|
||||||
Verbosef("writing fish completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing fish completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenFishCompletionFile(file, true)
|
return cmdRoot.GenFishCompletionFile(file, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeZSHCompletion(file string) error {
|
func writeZSHCompletion(file string) error {
|
||||||
Verbosef("writing zsh completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing zsh completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenZshCompletionFile(file)
|
return cmdRoot.GenZshCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writePowerShellCompletion(file string) error {
|
func writePowerShellCompletion(file string) error {
|
||||||
Verbosef("writing powershell completion file to %v\n", file)
|
if stdoutIsTerminal() {
|
||||||
|
Verbosef("writing powershell completion file to %v\n", file)
|
||||||
|
}
|
||||||
return cmdRoot.GenPowerShellCompletionFile(file)
|
return cmdRoot.GenPowerShellCompletionFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runGenerate(cmd *cobra.Command, args []string) error {
|
func runGenerate(_ *cobra.Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
|
||||||
|
}
|
||||||
|
|
||||||
if genOpts.ManDir != "" {
|
if genOpts.ManDir != "" {
|
||||||
err := writeManpages(genOpts.ManDir)
|
err := writeManpages(genOpts.ManDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -50,6 +50,10 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
|
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return errors.Fatal("the init command expects no arguments, only options - please see `restic help init` for usage and flags")
|
||||||
|
}
|
||||||
|
|
||||||
var version uint
|
var version uint
|
||||||
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
|
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
|
||||||
version = restic.MaxRepoVersion
|
version = restic.MaxRepoVersion
|
||||||
@@ -71,7 +75,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := ReadRepo(gopts)
|
gopts.Repo, err = ReadRepo(gopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -83,9 +87,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
be, err := create(ctx, repo, gopts.extended)
|
be, err := create(ctx, gopts.Repo, gopts, gopts.extended)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := repository.New(be, repository.Options{
|
s, err := repository.New(be, repository.Options{
|
||||||
@@ -93,16 +97,21 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
PackSize: gopts.PackSize * 1024 * 1024,
|
PackSize: gopts.PackSize * 1024 * 1024,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
|
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !gopts.JSON {
|
if !gopts.JSON {
|
||||||
Verbosef("created restic repository %v at %s\n", s.Config().ID[:10], location.StripPassword(gopts.Repo))
|
Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.backends, gopts.Repo))
|
||||||
|
if opts.CopyChunkerParameters && chunkerPolynomial != nil {
|
||||||
|
Verbosef(" with chunker parameters copied from secondary repository\n")
|
||||||
|
} else {
|
||||||
|
Verbosef("\n")
|
||||||
|
}
|
||||||
Verbosef("\n")
|
Verbosef("\n")
|
||||||
Verbosef("Please note that knowledge of your password is required to access\n")
|
Verbosef("Please note that knowledge of your password is required to access\n")
|
||||||
Verbosef("the repository. Losing your password means that your data is\n")
|
Verbosef("the repository. Losing your password means that your data is\n")
|
||||||
@@ -112,9 +121,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||||||
status := initSuccess{
|
status := initSuccess{
|
||||||
MessageType: "initialized",
|
MessageType: "initialized",
|
||||||
ID: s.Config().ID,
|
ID: s.Config().ID,
|
||||||
Repository: location.StripPassword(gopts.Repo),
|
Repository: location.StripPassword(gopts.backends, gopts.Repo),
|
||||||
}
|
}
|
||||||
return json.NewEncoder(gopts.stdout).Encode(status)
|
return json.NewEncoder(globalOptions.stdout).Encode(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
49
cmd/restic/cmd_init_integration_test.go
Normal file
49
cmd/restic/cmd_init_integration_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunInit(t testing.TB, opts GlobalOptions) {
|
||||||
|
repository.TestUseLowSecurityKDFParameters(t)
|
||||||
|
restic.TestDisableCheckPolynomial(t)
|
||||||
|
restic.TestSetLockTimeout(t, 0)
|
||||||
|
|
||||||
|
rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
|
||||||
|
t.Logf("repository initialized at %v", opts.Repo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitCopyChunkerParams(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
env2, cleanup2 := withTestEnvironment(t)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
testRunInit(t, env2.gopts)
|
||||||
|
|
||||||
|
initOpts := InitOptions{
|
||||||
|
secondaryRepoOptions: secondaryRepoOptions{
|
||||||
|
Repo: env2.gopts.Repo,
|
||||||
|
password: env2.gopts.password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
|
||||||
|
|
||||||
|
initOpts.CopyChunkerParameters = true
|
||||||
|
rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
|
||||||
|
|
||||||
|
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
|
||||||
|
"expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
|
||||||
|
otherRepo.Config().ChunkerPolynomial)
|
||||||
|
}
|
@@ -212,15 +212,18 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "list":
|
case "list":
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
if !gopts.NoLock {
|
||||||
defer unlockRepo(lock)
|
var lock *restic.Lock
|
||||||
if err != nil {
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
return err
|
defer unlockRepo(lock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return listKeys(ctx, repo, gopts)
|
return listKeys(ctx, repo, gopts)
|
||||||
case "add":
|
case "add":
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -228,7 +231,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return addKey(ctx, repo, gopts)
|
return addKey(ctx, repo, gopts)
|
||||||
case "remove":
|
case "remove":
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -241,7 +244,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||||||
|
|
||||||
return deleteKey(ctx, repo, id)
|
return deleteKey(ctx, repo, id)
|
||||||
case "passwd":
|
case "passwd":
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
145
cmd/restic/cmd_key_integration_test.go
Normal file
145
cmd/restic/cmd_key_integration_test.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/repository"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
return runKey(context.TODO(), gopts, []string{"list"})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(buf)
|
||||||
|
exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
|
||||||
|
|
||||||
|
IDs := []string{}
|
||||||
|
for scanner.Scan() {
|
||||||
|
if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
|
||||||
|
IDs = append(IDs, id[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = newPassword
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = "john's geheimnis"
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
keyUsername = ""
|
||||||
|
keyHostname = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
|
||||||
|
|
||||||
|
t.Log("adding key for john@example.com")
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
|
||||||
|
|
||||||
|
repo, err := OpenRepository(context.TODO(), gopts)
|
||||||
|
rtest.OK(t, err)
|
||||||
|
key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
rtest.Equals(t, "john", key.Username)
|
||||||
|
rtest.Equals(t, "example.com", key.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||||
|
testKeyNewPassword = newPassword
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
|
||||||
|
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
|
||||||
|
for _, id := range IDs {
|
||||||
|
rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyAddRemove(t *testing.T) {
|
||||||
|
passwordList := []string{
|
||||||
|
"OnnyiasyatvodsEvVodyawit",
|
||||||
|
"raicneirvOjEfEigonOmLasOd",
|
||||||
|
}
|
||||||
|
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
// must list keys more than once
|
||||||
|
env.gopts.backendTestHook = nil
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
testRunKeyPasswd(t, "geheim2", env.gopts)
|
||||||
|
env.gopts.password = "geheim2"
|
||||||
|
t.Logf("changed password to %q", env.gopts.password)
|
||||||
|
|
||||||
|
for _, newPassword := range passwordList {
|
||||||
|
testRunKeyAddNewKey(t, newPassword, env.gopts)
|
||||||
|
t.Logf("added new password %q", newPassword)
|
||||||
|
env.gopts.password = newPassword
|
||||||
|
testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
env.gopts.password = passwordList[len(passwordList)-1]
|
||||||
|
t.Logf("testing access with last password %q\n", env.gopts.password)
|
||||||
|
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
|
||||||
|
testRunKeyAddNewKeyUserHost(t, env.gopts)
|
||||||
|
}
|
||||||
|
|
||||||
|
type emptySaveBackend struct {
|
||||||
|
restic.Backend
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
|
||||||
|
return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyProblems(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
|
||||||
|
return &emptySaveBackend{r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
testKeyNewPassword = "geheim2"
|
||||||
|
defer func() {
|
||||||
|
testKeyNewPassword = ""
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := runKey(context.TODO(), env.gopts, []string{"passwd"})
|
||||||
|
t.Log(err)
|
||||||
|
rtest.Assert(t, err != nil, "expected passwd change to fail")
|
||||||
|
|
||||||
|
err = runKey(context.TODO(), env.gopts, []string{"add"})
|
||||||
|
t.Log(err)
|
||||||
|
rtest.Assert(t, err != nil, "expected key adding to fail")
|
||||||
|
|
||||||
|
t.Logf("testing access with initial password %q\n", env.gopts.password)
|
||||||
|
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
}
|
@@ -31,19 +31,19 @@ func init() {
|
|||||||
cmdRoot.AddCommand(cmdList)
|
cmdRoot.AddCommand(cmdList)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args []string) error {
|
func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.Fatal("type not specified, usage: " + cmd.Use)
|
return errors.Fatal("type not specified, usage: " + cmd.Use)
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := OpenRepository(ctx, opts)
|
repo, err := OpenRepository(ctx, gopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.NoLock && args[0] != "locks" {
|
if !gopts.NoLock && args[0] != "locks" {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -63,7 +63,7 @@ func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args [
|
|||||||
case "locks":
|
case "locks":
|
||||||
t = restic.LockFile
|
t = restic.LockFile
|
||||||
case "blobs":
|
case "blobs":
|
||||||
return index.ForAllIndexes(ctx, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
return index.ForAllIndexes(ctx, repo.Backend(), repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
44
cmd/restic/cmd_list_integration_test.go
Normal file
44
cmd/restic/cmd_list_integration_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
return runList(context.TODO(), cmdList, opts, []string{tpe})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return parseIDsFromReader(t, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
|
||||||
|
t.Helper()
|
||||||
|
IDs := restic.IDs{}
|
||||||
|
sc := bufio.NewScanner(rd)
|
||||||
|
|
||||||
|
for sc.Scan() {
|
||||||
|
id, err := restic.ParseID(sc.Text())
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("parse id %v: %v", sc.Text(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
IDs = append(IDs, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs {
|
||||||
|
t.Helper()
|
||||||
|
snapshotIDs := testRunList(t, "snapshots", opts)
|
||||||
|
rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs)
|
||||||
|
return snapshotIDs
|
||||||
|
}
|
@@ -50,7 +50,8 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||||||
type LsOptions struct {
|
type LsOptions struct {
|
||||||
ListLong bool
|
ListLong bool
|
||||||
restic.SnapshotFilter
|
restic.SnapshotFilter
|
||||||
Recursive bool
|
Recursive bool
|
||||||
|
HumanReadable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var lsOptions LsOptions
|
var lsOptions LsOptions
|
||||||
@@ -62,6 +63,7 @@ func init() {
|
|||||||
initSingleSnapshotFilter(flags, &lsOptions.SnapshotFilter)
|
initSingleSnapshotFilter(flags, &lsOptions.SnapshotFilter)
|
||||||
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||||
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
|
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
|
||||||
|
flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
|
||||||
}
|
}
|
||||||
|
|
||||||
type lsSnapshot struct {
|
type lsSnapshot struct {
|
||||||
@@ -85,6 +87,7 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error {
|
|||||||
ModTime time.Time `json:"mtime,omitempty"`
|
ModTime time.Time `json:"mtime,omitempty"`
|
||||||
AccessTime time.Time `json:"atime,omitempty"`
|
AccessTime time.Time `json:"atime,omitempty"`
|
||||||
ChangeTime time.Time `json:"ctime,omitempty"`
|
ChangeTime time.Time `json:"ctime,omitempty"`
|
||||||
|
Inode uint64 `json:"inode,omitempty"`
|
||||||
StructType string `json:"struct_type"` // "node"
|
StructType string `json:"struct_type"` // "node"
|
||||||
|
|
||||||
size uint64 // Target for Size pointer.
|
size uint64 // Target for Size pointer.
|
||||||
@@ -100,6 +103,7 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error {
|
|||||||
ModTime: node.ModTime,
|
ModTime: node.ModTime,
|
||||||
AccessTime: node.AccessTime,
|
AccessTime: node.AccessTime,
|
||||||
ChangeTime: node.ChangeTime,
|
ChangeTime: node.ChangeTime,
|
||||||
|
Inode: node.Inode,
|
||||||
StructType: "node",
|
StructType: "node",
|
||||||
}
|
}
|
||||||
// Always print size for regular files, even when empty,
|
// Always print size for regular files, even when empty,
|
||||||
@@ -171,7 +175,8 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.LoadIndex(ctx); err != nil {
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
if err = repo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -181,7 +186,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
)
|
)
|
||||||
|
|
||||||
if gopts.JSON {
|
if gopts.JSON {
|
||||||
enc := json.NewEncoder(gopts.stdout)
|
enc := json.NewEncoder(globalOptions.stdout)
|
||||||
|
|
||||||
printSnapshot = func(sn *restic.Snapshot) {
|
printSnapshot = func(sn *restic.Snapshot) {
|
||||||
err := enc.Encode(lsSnapshot{
|
err := enc.Encode(lsSnapshot{
|
||||||
@@ -206,11 +211,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
|
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
|
||||||
}
|
}
|
||||||
printNode = func(path string, node *restic.Node) {
|
printNode = func(path string, node *restic.Node) {
|
||||||
Printf("%s\n", formatNode(path, node, lsOptions.ListLong))
|
Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sn, err := (&restic.SnapshotFilter{
|
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||||
Hosts: opts.Hosts,
|
Hosts: opts.Hosts,
|
||||||
Paths: opts.Paths,
|
Paths: opts.Paths,
|
||||||
Tags: opts.Tags,
|
Tags: opts.Tags,
|
||||||
@@ -219,6 +224,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
printSnapshot(sn)
|
printSnapshot(sn)
|
||||||
|
|
||||||
err = walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
err = walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||||
|
19
cmd/restic/cmd_ls_integration_test.go
Normal file
19
cmd/restic/cmd_ls_integration_test.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.Quiet = true
|
||||||
|
opts := LsOptions{}
|
||||||
|
return runLs(context.TODO(), opts, gopts, []string{snapshotID})
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
return strings.Split(buf.String(), "\n")
|
||||||
|
}
|
@@ -122,7 +122,7 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@@ -123,14 +123,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||||||
|
|
||||||
if !gopts.NoLock {
|
if !gopts.NoLock {
|
||||||
var lock *restic.Lock
|
var lock *restic.Lock
|
||||||
lock, ctx, err = lockRepo(ctx, repo)
|
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = repo.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err = repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -12,6 +12,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
@@ -63,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
|||||||
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
func testRunUmount(t testing.TB, dir string) {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < mountWait; i++ {
|
for i := 0; i < mountWait; i++ {
|
||||||
if err = umount(dir); err == nil {
|
if err = umount(dir); err == nil {
|
||||||
@@ -94,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
|||||||
go testRunMount(t, global, mountpoint, &wg)
|
go testRunMount(t, global, mountpoint, &wg)
|
||||||
waitForMount(t, mountpoint)
|
waitForMount(t, mountpoint)
|
||||||
defer wg.Wait()
|
defer wg.Wait()
|
||||||
defer testRunUmount(t, global, mountpoint)
|
defer testRunUmount(t, mountpoint)
|
||||||
|
|
||||||
if !snapshotsDirExists(t, mountpoint) {
|
if !snapshotsDirExists(t, mountpoint) {
|
||||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||||
@@ -159,6 +160,11 @@ func TestMount(t *testing.T) {
|
|||||||
t.Skip("Skipping fuse tests")
|
t.Skip("Skipping fuse tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debugEnabled := debug.TestLogToStderr(t)
|
||||||
|
if debugEnabled {
|
||||||
|
defer debug.TestDisableLog(t)
|
||||||
|
}
|
||||||
|
|
||||||
env, cleanup := withTestEnvironment(t)
|
env, cleanup := withTestEnvironment(t)
|
||||||
// must list snapshots more than once
|
// must list snapshots more than once
|
||||||
env.gopts.backendTestHook = nil
|
env.gopts.backendTestHook = nil
|
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -80,7 +81,7 @@ func addPruneOptions(c *cobra.Command) {
|
|||||||
func verifyPruneOptions(opts *PruneOptions) error {
|
func verifyPruneOptions(opts *PruneOptions) error {
|
||||||
opts.MaxRepackBytes = math.MaxUint64
|
opts.MaxRepackBytes = math.MaxUint64
|
||||||
if len(opts.MaxRepackSize) > 0 {
|
if len(opts.MaxRepackSize) > 0 {
|
||||||
size, err := parseSizeStr(opts.MaxRepackSize)
|
size, err := ui.ParseBytes(opts.MaxRepackSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -123,7 +124,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
size, err := parseSizeStr(maxUnused)
|
size, err := ui.ParseBytes(maxUnused)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
|
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
|
||||||
}
|
}
|
||||||
@@ -167,7 +168,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
|||||||
opts.unsafeRecovery = true
|
opts.unsafeRecovery = true
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -186,7 +187,8 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||||||
|
|
||||||
Verbosef("loading indexes...\n")
|
Verbosef("loading indexes...\n")
|
||||||
// loading the index before the snapshots is ok, as we use an exclusive lock here
|
// loading the index before the snapshots is ok, as we use an exclusive lock here
|
||||||
err := repo.LoadIndex(ctx)
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
err := repo.LoadIndex(ctx, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -205,6 +207,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Trigger GC to reset garbage collection threshold
|
||||||
|
runtime.GC()
|
||||||
|
|
||||||
return doPrune(ctx, opts, gopts, repo, plan)
|
return doPrune(ctx, opts, gopts, repo, plan)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,7 +493,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi
|
|||||||
// Pack size does not fit and pack is needed => error
|
// Pack size does not fit and pack is needed => error
|
||||||
// If the pack is not needed, this is no error, the pack can
|
// If the pack is not needed, this is no error, the pack can
|
||||||
// and will be simply removed, see below.
|
// and will be simply removed, see below.
|
||||||
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic rebuild-index'.\n",
|
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
|
||||||
id.Str(), p.unusedSize+p.usedSize, packSize)
|
id.Str(), p.unusedSize+p.usedSize, packSize)
|
||||||
return errorSizeNotMatching
|
return errorSizeNotMatching
|
||||||
}
|
}
|
||||||
@@ -729,7 +734,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
|
|||||||
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatalf("%s", err)
|
return errors.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also remove repacked packs
|
// Also remove repacked packs
|
||||||
|
221
cmd/restic/cmd_prune_integration_test.go
Normal file
221
cmd/restic/cmd_prune_integration_test.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
rtest "github.com/restic/restic/internal/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
||||||
|
oldHook := gopts.backendTestHook
|
||||||
|
gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
|
||||||
|
defer func() {
|
||||||
|
gopts.backendTestHook = oldHook
|
||||||
|
}()
|
||||||
|
rtest.OK(t, runPrune(context.TODO(), opts, gopts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrune(t *testing.T) {
|
||||||
|
testPruneVariants(t, false)
|
||||||
|
testPruneVariants(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
|
||||||
|
suffix := ""
|
||||||
|
if unsafeNoSpaceRecovery {
|
||||||
|
suffix = "-recovery"
|
||||||
|
}
|
||||||
|
t.Run("0"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("50"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unlimited"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CachableOnly"+suffix, func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
|
||||||
|
checkOpts := CheckOptions{ReadData: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
t.Run("Small", func(t *testing.T) {
|
||||||
|
opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
|
||||||
|
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
||||||
|
testPrune(t, opts, checkOpts)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPrunableRepo(t *testing.T, env *testEnvironment) {
|
||||||
|
testSetupBackupData(t, env)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
|
||||||
|
firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 3)
|
||||||
|
|
||||||
|
testRunForgetJSON(t, env.gopts)
|
||||||
|
testRunForget(t, env.gopts, firstSnapshot.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||||
|
buf, err := withCaptureStdout(func() error {
|
||||||
|
gopts.JSON = true
|
||||||
|
opts := ForgetOptions{
|
||||||
|
DryRun: true,
|
||||||
|
Last: 1,
|
||||||
|
}
|
||||||
|
return runForget(context.TODO(), opts, gopts, args)
|
||||||
|
})
|
||||||
|
rtest.OK(t, err)
|
||||||
|
|
||||||
|
var forgets []*ForgetGroup
|
||||||
|
rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
|
||||||
|
|
||||||
|
rtest.Assert(t, len(forgets) == 1,
|
||||||
|
"Expected 1 snapshot group, got %v", len(forgets))
|
||||||
|
rtest.Assert(t, len(forgets[0].Keep) == 1,
|
||||||
|
"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
|
||||||
|
rtest.Assert(t, len(forgets[0].Remove) == 2,
|
||||||
|
"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
createPrunableRepo(t, env)
|
||||||
|
testRunPrune(t, env.gopts, pruneOpts)
|
||||||
|
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
|
||||||
|
|
||||||
|
func TestPruneWithDamagedRepository(t *testing.T) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||||
|
testRunInit(t, env.gopts)
|
||||||
|
|
||||||
|
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||||
|
opts := BackupOptions{}
|
||||||
|
|
||||||
|
// create and delete snapshot to create unused blobs
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
|
||||||
|
firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
|
||||||
|
testRunForget(t, env.gopts, firstSnapshot.String())
|
||||||
|
|
||||||
|
oldPacks := listPacks(env.gopts, t)
|
||||||
|
|
||||||
|
// create new snapshot, but lose all data
|
||||||
|
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||||
|
testListSnapshots(t, env.gopts, 1)
|
||||||
|
removePacksExcept(env.gopts, t, oldPacks, false)
|
||||||
|
|
||||||
|
oldHook := env.gopts.backendTestHook
|
||||||
|
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
|
||||||
|
defer func() {
|
||||||
|
env.gopts.backendTestHook = oldHook
|
||||||
|
}()
|
||||||
|
// prune should fail
|
||||||
|
rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
|
||||||
|
"prune should have reported index not complete error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test repos for edge cases
|
||||||
|
func TestEdgeCaseRepos(t *testing.T) {
|
||||||
|
opts := CheckOptions{}
|
||||||
|
|
||||||
|
// repo where index is completely missing
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("no-index", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where an existing and used blob is missing from the index
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("index-missing-blob", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where a blob is missing
|
||||||
|
// => check and prune should fail
|
||||||
|
t.Run("missing-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where blobs which are not needed are missing or in invalid pack files
|
||||||
|
// => check should fail and prune should repair this
|
||||||
|
t.Run("missing-unused-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where data exists that is not referenced
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("unreferenced-data", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo where an obsolete index still exists
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("obsolete-index", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo which contains mixed (data/tree) packs
|
||||||
|
// => check and prune should fully work
|
||||||
|
t.Run("mixed-packs", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
// repo which contains duplicate blobs
|
||||||
|
// => checking for unused data should report an error and prune resolves the
|
||||||
|
// situation
|
||||||
|
opts = CheckOptions{
|
||||||
|
ReadData: true,
|
||||||
|
CheckUnused: true,
|
||||||
|
}
|
||||||
|
t.Run("duplicates", func(t *testing.T) {
|
||||||
|
testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
|
||||||
|
env, cleanup := withTestEnvironment(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
datafile := filepath.Join("testdata", tarfile)
|
||||||
|
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||||
|
|
||||||
|
if checkOK {
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
|
||||||
|
"check should have reported an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pruneOK {
|
||||||
|
testRunPrune(t, env.gopts, optionsPrune)
|
||||||
|
testRunCheck(t, env.gopts)
|
||||||
|
} else {
|
||||||
|
rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
|
||||||
|
"prune should have reported an error")
|
||||||
|
}
|
||||||
|
}
|
@@ -46,7 +46,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
lock, ctx, err := lockRepo(ctx, repo)
|
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||||
defer unlockRepo(lock)
|
defer unlockRepo(lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -58,7 +58,8 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Verbosef("load index files\n")
|
Verbosef("load index files\n")
|
||||||
if err = repo.LoadIndex(ctx); err != nil {
|
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||||
|
if err = repo.LoadIndex(ctx, bar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,7 +74,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||||||
})
|
})
|
||||||
|
|
||||||
Verbosef("load %d trees\n", len(trees))
|
Verbosef("load %d trees\n", len(trees))
|
||||||
bar := newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
|
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
|
||||||
for id := range trees {
|
for id := range trees {
|
||||||
tree, err := restic.LoadTree(ctx, repo, id)
|
tree, err := restic.LoadTree(ctx, repo, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
14
cmd/restic/cmd_repair.go
Normal file
14
cmd/restic/cmd_repair.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cmdRepair = &cobra.Command{
|
||||||
|
Use: "repair",
|
||||||
|
Short: "Repair the repository",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmdRoot.AddCommand(cmdRepair)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user