mirror of
https://github.com/restic/restic.git
synced 2025-08-22 01:51:47 +00:00
Compare commits
272 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c02923fbfc | ||
![]() |
7c5ce83044 | ||
![]() |
37e2e9a844 | ||
![]() |
26e5db1849 | ||
![]() |
a2766ffe0c | ||
![]() |
0f5e38609f | ||
![]() |
f178cbf93d | ||
![]() |
c8096ca8d2 | ||
![]() |
27d29b9853 | ||
![]() |
8a171731ba | ||
![]() |
abde9e2fba | ||
![]() |
6a4a328bbc | ||
![]() |
8253fadc96 | ||
![]() |
134abbd82b | ||
![]() |
cd8226130a | ||
![]() |
1ebf0e8de8 | ||
![]() |
0fdb9a6129 | ||
![]() |
47b326b7b5 | ||
![]() |
e2cf6eb434 | ||
![]() |
f79698dcdd | ||
![]() |
35a5307db3 | ||
![]() |
014cec06f1 | ||
![]() |
431ab5aa6a | ||
![]() |
262b0cd9d4 | ||
![]() |
e83ec17e95 | ||
![]() |
e2005e02bb | ||
![]() |
41c8c946ba | ||
![]() |
fe08686558 | ||
![]() |
0ed2401711 | ||
![]() |
06bd606d85 | ||
![]() |
c347431907 | ||
![]() |
f63d7048f9 | ||
![]() |
f39f7c76dd | ||
![]() |
0268d0e7d6 | ||
![]() |
8515d093e0 | ||
![]() |
fe3f326d8d | ||
![]() |
8170db40c7 | ||
![]() |
99ac0da4bc | ||
![]() |
7e2c93420f | ||
![]() |
6d46824fb0 | ||
![]() |
bb435b39d9 | ||
![]() |
2a67d7a6c2 | ||
![]() |
ba43c8bab5 | ||
![]() |
931e6ed2ac | ||
![]() |
a5f0e9ab65 | ||
![]() |
6fc133ad6a | ||
![]() |
e1b80859f2 | ||
![]() |
d069ee31b2 | ||
![]() |
981752ade0 | ||
![]() |
d01d07fc0a | ||
![]() |
526aaca6f5 | ||
![]() |
2f8147af59 | ||
![]() |
f3016a9096 | ||
![]() |
f854a41ba9 | ||
![]() |
ca3cadef5e | ||
![]() |
3304b0fcf0 | ||
![]() |
d8938e259a | ||
![]() |
53a554c89d | ||
![]() |
e71db01230 | ||
![]() |
178e946fc7 | ||
![]() |
f3bff12939 | ||
![]() |
7a99418dc5 | ||
![]() |
c71ba466ea | ||
![]() |
8ce5d35543 | ||
![]() |
134f834c60 | ||
![]() |
8a37c07295 | ||
![]() |
bd0ada7842 | ||
![]() |
eea96f652d | ||
![]() |
38c3061df7 | ||
![]() |
f5fa602482 | ||
![]() |
e44ac55f63 | ||
![]() |
f1cfb73a8b | ||
![]() |
5b96885c6d | ||
![]() |
c5da90a5b7 | ||
![]() |
bcdebfb84e | ||
![]() |
359b273649 | ||
![]() |
2e2c8dc620 | ||
![]() |
8d37b723ca | ||
![]() |
315b7f282f | ||
![]() |
a3f8e9dfa7 | ||
![]() |
982810f7cc | ||
![]() |
90b96d19cd | ||
![]() |
6a52bb6f54 | ||
![]() |
cacaa4393f | ||
![]() |
d63ab4e9a4 | ||
![]() |
ca6daec8dd | ||
![]() |
c87f2420a6 | ||
![]() |
f5bbbc52f4 | ||
![]() |
9e3dde8ec7 | ||
![]() |
9dba182e51 | ||
![]() |
944fc857eb | ||
![]() |
7507a658ac | ||
![]() |
9fa4f5eb6b | ||
![]() |
ce4d71d626 | ||
![]() |
8e2ef3f38b | ||
![]() |
8dc952775e | ||
![]() |
99b6163e27 | ||
![]() |
beaf55f1fc | ||
![]() |
980bb9059f | ||
![]() |
0e7281eb71 | ||
![]() |
0b6133d7b5 | ||
![]() |
b57ca64275 | ||
![]() |
faadbd734b | ||
![]() |
88b0a93409 | ||
![]() |
4a995105a9 | ||
![]() |
7fe496f983 | ||
![]() |
e56370eb5b | ||
![]() |
b8af7f63a0 | ||
![]() |
3eea555155 | ||
![]() |
897c923cc9 | ||
![]() |
67193e3deb | ||
![]() |
0e722efb09 | ||
![]() |
3736f33ebf | ||
![]() |
d1d9c3f9d7 | ||
![]() |
cd5cbe0910 | ||
![]() |
814e992c0b | ||
![]() |
660fe78735 | ||
![]() |
87d084e18c | ||
![]() |
9ce2a73fc5 | ||
![]() |
f2314b26ba | ||
![]() |
74dcf41f25 | ||
![]() |
b6ba30186f | ||
![]() |
32637a0328 | ||
![]() |
0addd90e14 | ||
![]() |
1b5ee5b10a | ||
![]() |
042adeb5d0 | ||
![]() |
7e4ce0dacc | ||
![]() |
8ceb22fe8a | ||
![]() |
c5553ec855 | ||
![]() |
bb3ed54291 | ||
![]() |
513ba3b6f7 | ||
![]() |
17d688afef | ||
![]() |
d81eee26b3 | ||
![]() |
cc5ada63a4 | ||
![]() |
88fb60e0b5 | ||
![]() |
02200acad0 | ||
![]() |
1a2d190bdb | ||
![]() |
2db4ff168a | ||
![]() |
a77c8cc5d2 | ||
![]() |
b8866c1fe4 | ||
![]() |
f0f17db847 | ||
![]() |
a5c003acb0 | ||
![]() |
7b44fd0f9d | ||
![]() |
cebee0b8fa | ||
![]() |
d886bc6c48 | ||
![]() |
d81adcfaa5 | ||
![]() |
6da9bfbbce | ||
![]() |
69a6e622d0 | ||
![]() |
1dcfd64028 | ||
![]() |
5d1c1f721e | ||
![]() |
fbc8bbf305 | ||
![]() |
cdef55bb88 | ||
![]() |
26df48b2aa | ||
![]() |
a7baea0522 | ||
![]() |
55e6003749 | ||
![]() |
846acd5d4c | ||
![]() |
43f8145858 | ||
![]() |
79759928f6 | ||
![]() |
eb59d28154 | ||
![]() |
79f63a2e74 | ||
![]() |
6a62254048 | ||
![]() |
657a1d75af | ||
![]() |
2694def56a | ||
![]() |
7843341da3 | ||
![]() |
d46314648e | ||
![]() |
e45011af57 | ||
![]() |
fb09884893 | ||
![]() |
d1eecafa63 | ||
![]() |
b47d991f56 | ||
![]() |
553ea812a7 | ||
![]() |
216e374310 | ||
![]() |
034b0b8040 | ||
![]() |
5ab9e12b46 | ||
![]() |
abe6e0d22d | ||
![]() |
f5b550191c | ||
![]() |
3dcacb3730 | ||
![]() |
033589a66b | ||
![]() |
e46a647c45 | ||
![]() |
f26492fc2d | ||
![]() |
3473c3f7b6 | ||
![]() |
1b5242b4f9 | ||
![]() |
6d897def1b | ||
![]() |
2f10e25738 | ||
![]() |
555bd257bd | ||
![]() |
3afd974dea | ||
![]() |
f4120c9d45 | ||
![]() |
61cb1cc6f8 | ||
![]() |
49bc1d0b3b | ||
![]() |
ba23d24dd1 | ||
![]() |
556a63de19 | ||
![]() |
fae3c4d437 | ||
![]() |
89c2ed2a1c | ||
![]() |
23f1cb06d6 | ||
![]() |
ac92e2dd2d | ||
![]() |
bf58425351 | ||
![]() |
a3dc0ab398 | ||
![]() |
224ebdb8b9 | ||
![]() |
cf80d295f3 | ||
![]() |
2133869127 | ||
![]() |
97330ac621 | ||
![]() |
1ee1559506 | ||
![]() |
eccc336319 | ||
![]() |
7fe657ec71 | ||
![]() |
77c07bfd19 | ||
![]() |
4de938d97a | ||
![]() |
dad1c87afe | ||
![]() |
801dbb6d03 | ||
![]() |
fa0be82da8 | ||
![]() |
7e8bc8d362 | ||
![]() |
0bb2a8e0d0 | ||
![]() |
2e72b57f2f | ||
![]() |
bff1039e3a | ||
![]() |
5a999cb77f | ||
![]() |
3a2539e0ac | ||
![]() |
e262f35d0a | ||
![]() |
176bfa6529 | ||
![]() |
240c4cf2fd | ||
![]() |
db5ec5d876 | ||
![]() |
e1dfaf5d87 | ||
![]() |
5436154f0d | ||
![]() |
809e218d20 | ||
![]() |
1eaad6cebb | ||
![]() |
56fccecd06 | ||
![]() |
3890a947ca | ||
![]() |
e299272378 | ||
![]() |
70248bd05a | ||
![]() |
7a5fde8f5a | ||
![]() |
62ba9f1950 | ||
![]() |
610b676444 | ||
![]() |
58699e3c90 | ||
![]() |
9be24a1c9f | ||
![]() |
5ace41471e | ||
![]() |
3b2106ed30 | ||
![]() |
5f4f997126 | ||
![]() |
49d397a419 | ||
![]() |
ea1ab96749 | ||
![]() |
24c62e719a | ||
![]() |
9c6b7f688e | ||
![]() |
d41dce5ecb | ||
![]() |
52a3eafede | ||
![]() |
55dfc85159 | ||
![]() |
a7a478a19e | ||
![]() |
2080afd9de | ||
![]() |
9aa136b982 | ||
![]() |
3a191f37cb | ||
![]() |
429106340f | ||
![]() |
530c73b457 | ||
![]() |
fb9729fdb9 | ||
![]() |
45a09c76ff | ||
![]() |
efd65a1b65 | ||
![]() |
ae60188eb9 | ||
![]() |
3b904525d9 | ||
![]() |
1e31f5202f | ||
![]() |
f12d41138a | ||
![]() |
98369f6a5d | ||
![]() |
8f9bf1995b | ||
![]() |
e7de3b5f9d | ||
![]() |
3541d06d07 | ||
![]() |
db0e3cd772 | ||
![]() |
d3fee08f9a | ||
![]() |
727fb6eabe | ||
![]() |
d610c60991 | ||
![]() |
3f6e11d26e | ||
![]() |
a4577769ae | ||
![]() |
7f927d4774 | ||
![]() |
e091673f8f | ||
![]() |
9842eff887 | ||
![]() |
c40b3d3983 | ||
![]() |
ac5eefdee4 | ||
![]() |
bf508643a5 | ||
![]() |
02fc16e97d | ||
![]() |
1a83a739dc |
28
.github/ISSUE_TEMPLATE.md
vendored
28
.github/ISSUE_TEMPLATE.md
vendored
@@ -4,8 +4,9 @@ take a lot longer to find the problem! Please take the time to help us
|
||||
debugging the problem by collecting information, even if it seems irrelevant to
|
||||
you. Thanks!
|
||||
|
||||
If you have a question, maybe the forum at https://discourse.restic.net is a
|
||||
better place.
|
||||
If you have a question, the forum at https://discourse.restic.net is a better place.
|
||||
Please do not create issues for usage or documentation questions! We're using
|
||||
the GitHub issue tracker mainly for tracking bugs and feature requests.
|
||||
-->
|
||||
|
||||
## Output of `restic version`
|
||||
@@ -14,22 +15,39 @@ better place.
|
||||
## How did you run restic exactly?
|
||||
|
||||
<!--
|
||||
Include the complete command line and any environment variables you used to
|
||||
configure restic's backend access. Make sure to replace sensitive values!
|
||||
This section should include at least:
|
||||
|
||||
* The complete command line and any environment variables you used to
|
||||
configure restic's backend access. Make sure to replace sensitive values!
|
||||
|
||||
* The output of the commands, what restic prints gives may give us much
|
||||
information to diagnose the problem!
|
||||
-->
|
||||
|
||||
|
||||
## What backend/server/service did you use?
|
||||
## What backend/server/service did you use to store the repository?
|
||||
|
||||
|
||||
## Expected behavior
|
||||
|
||||
<!--
|
||||
Describe what you'd like restic to do differently.
|
||||
-->
|
||||
|
||||
## Actual behavior
|
||||
|
||||
<!--
|
||||
In this section, please try to concentrate on observations, so only describe
|
||||
what you observed directly.
|
||||
-->
|
||||
|
||||
## Steps to reproduce the behavior
|
||||
|
||||
<!--
|
||||
The more time you spend describing an easy way to reproduce the behavior (if
|
||||
this is possible), the easier it is for the project developers to fix it!
|
||||
-->
|
||||
|
||||
|
||||
## Do you have any idea what may have caused this?
|
||||
|
||||
|
132
CHANGELOG.md
132
CHANGELOG.md
@@ -1,6 +1,138 @@
|
||||
This file describes changes relevant to all users that are made in each
|
||||
released version of restic from the perspective of the user.
|
||||
|
||||
Important Changes in 0.8.0
|
||||
==========================
|
||||
|
||||
* A vulnerability was found in the restic restorer, which allowed attackers in
|
||||
special circumstances to restore files to a location outside of the target
|
||||
directory. Due to the circumstances we estimate this to be a low-risk
|
||||
vulnerability, but urge all users to upgrade to the latest version of restic.
|
||||
|
||||
Exploiting the vulnerability requires a Linux/Unix system which saves
|
||||
backups via restic and a Windows systems which restores files from the repo.
|
||||
In addition, the attackers need to be able to create create files with
|
||||
arbitrary names which are then saved to the restic repo. For example, by
|
||||
creating a file named "..\test.txt" (which is a perfectly legal filename on
|
||||
Linux) and restoring a snapshot containing this file on Windows, it would be
|
||||
written to the parent of the target directory.
|
||||
|
||||
We'd like to thank Tyler Spivey for reporting this responsibly!
|
||||
|
||||
https://github.com/restic/restic/pull/1445
|
||||
|
||||
* The s3 backend used the subdir `restic` within a bucket if no explicit path
|
||||
after the bucket name was specified. Since this version, restic does not use
|
||||
this default path any more. If you created a repo on s3 in a bucket without
|
||||
specifying a path within the bucket, you need to add `/restic` at the end of
|
||||
the repository specification to access your repo: `s3:s3.amazonaws.com/bucket/restic`
|
||||
https://github.com/restic/restic/issues/1292
|
||||
https://github.com/restic/restic/pull/1437
|
||||
|
||||
* We've added a local cache for metadata so that restic doesn't need to load
|
||||
all metadata (snapshots, indexes, ...) from the repo each time it starts. By
|
||||
default the cache is active, but there's a new global option `--no-cache`
|
||||
that can be used to disable the cache. By deafult, the cache a standard
|
||||
cache folder for the OS, which can be overridden with `--cache-dir`. The
|
||||
cache will automatically populate, indexes and snapshots are saved as they
|
||||
are loaded.
|
||||
https://github.com/restic/restic/pull/1040
|
||||
https://github.com/restic/restic/issues/29
|
||||
https://github.com/restic/restic/issues/738
|
||||
https://github.com/restic/restic/issues/282
|
||||
https://github.com/restic/restic/pull/1287
|
||||
|
||||
* A related change was to by default create pack files in the repo that
|
||||
contain either data or metadata, not both mixed together. This allows easy
|
||||
caching of only the metadata files. The next run of `restic prune` will
|
||||
untangle mixed files automatically.
|
||||
https://github.com/restic/restic/pull/1265
|
||||
|
||||
* The Google Cloud Storage backend no longer requires the service account to
|
||||
have the `storage.buckets.get` permission ("Storage Admin" role) in `restic
|
||||
init` if the bucket already exists.
|
||||
https://github.com/restic/restic/pull/1281
|
||||
|
||||
* Added support for rate limiting through `--limit-upload` and
|
||||
`--limit-download` flags.
|
||||
https://github.com/restic/restic/issues/1216
|
||||
https://github.com/restic/restic/pull/1336
|
||||
https://github.com/restic/restic/pull/1358
|
||||
|
||||
* Failed backend requests are now automatically retried.
|
||||
https://github.com/restic/restic/pull/1353
|
||||
|
||||
* We've added the `dump` command which prints a file from a snapshot to
|
||||
stdout. This can e.g. be used to restore files read with `backup --stdin`.
|
||||
https://github.com/restic/restic/issues/510
|
||||
https://github.com/restic/restic/pull/1346
|
||||
|
||||
Small changes
|
||||
-------------
|
||||
|
||||
* The directory structure in the fuse mount now exposes a symlink `latest`
|
||||
which points to the latest snapshot in that particular directory.
|
||||
https://github.com/restic/restic/pull/1249
|
||||
|
||||
* The option `--compact` was added to the `forget` command to provide the same
|
||||
compact view as the `snapshots` command.
|
||||
https://github.com/restic/restic/pull/1269
|
||||
|
||||
* We've re-enabled a workaround for `minio-go` (the library we're using to
|
||||
access s3 backends), this reduces memory usage.
|
||||
https://github.com/restic/restic/issues/1256
|
||||
https://github.com/restic/restic/pull/1267
|
||||
|
||||
* The sftp backend now prompts for the password if a password is necessary for
|
||||
login.
|
||||
https://github.com/restic/restic/issues/448
|
||||
https://github.com/restic/restic/pull/1270
|
||||
|
||||
* The `generate` command has been added, which replaces the now removed
|
||||
commands `manpage` and `autocomplete`. This release of restic contains the
|
||||
most recent manpages in `doc/man` and the auto-completion files for bash and
|
||||
zsh in `doc/bash-completion.sh` and `doc/zsh-completion.zsh`
|
||||
https://github.com/restic/restic/issues/1274
|
||||
https://github.com/restic/restic/pull/1282
|
||||
|
||||
* A bug was discovered in the library we're using to access Backblaze, it now
|
||||
reuses already established TCP connections which should be a lot faster and
|
||||
not cause network failures any more.
|
||||
https://github.com/restic/restic/issues/1291
|
||||
https://github.com/restic/restic/pull/1301
|
||||
|
||||
* Another bug in the `forget` command caused `prune` not to be run when
|
||||
`--prune` was specified without a policy, e.g. when only snapshot IDs that
|
||||
should be forgotten are listed manually. This is corrected now.
|
||||
https://github.com/restic/restic/pull/1317
|
||||
|
||||
* The `check` command now explicetly prints `No errors were found` when no
|
||||
errors could be found.
|
||||
https://github.com/restic/restic/pull/1319
|
||||
https://github.com/restic/restic/issues/1303
|
||||
|
||||
* The fuse mount now has an `ids` subdirectory which contains the snapshots
|
||||
below their (short) IDs.
|
||||
https://github.com/restic/restic/issues/1102
|
||||
https://github.com/restic/restic/pull/1299
|
||||
https://github.com/restic/restic/pull/1320
|
||||
|
||||
* The `backup` command was improved, it now caches the result of excludes for
|
||||
a directory.
|
||||
https://github.com/restic/restic/issues/1271
|
||||
https://github.com/restic/restic/pull/1326
|
||||
|
||||
* We've added the `--cacert` option which can be used to pass one (or more) CA
|
||||
certificates to restic. These are used in addition to the system CA
|
||||
certificates to verify HTTPS certificates (e.g. for the REST backend).
|
||||
https://github.com/restic/restic/issues/1114
|
||||
https://github.com/restic/restic/pull/1276
|
||||
|
||||
* When the list of files/dirs to be saved is read from a file with
|
||||
`--files-from`, comment lines (starting with `#`) are now ignored.
|
||||
https://github.com/restic/restic/issues/1367
|
||||
https://github.com/restic/restic/pull/1368
|
||||
|
||||
Important Changes in 0.7.3
|
||||
==========================
|
||||
|
||||
|
@@ -129,13 +129,7 @@ down to the following steps:
|
||||
next stable release. While writing, ask yourself: If I were the user, what
|
||||
would I need to be aware of with this change.
|
||||
|
||||
8. When your contribution adds and/or changes command-line parameters or help
|
||||
texts, the manual pages need to be regenerated and commited to the
|
||||
repository. In order to do this, compile restic and save the generated
|
||||
updated man pages in the subdir `doc/man` with the following command:
|
||||
`./restic manpage --output-dir doc/man`
|
||||
|
||||
9. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
8. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
a lot for your contribution!
|
||||
|
||||
Please provide the patches for each bug or feature in a separate branch and
|
||||
|
60
Gopkg.lock
generated
60
Gopkg.lock
generated
@@ -10,20 +10,26 @@
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "5a9e19d4e1e41a734154e44a2132b358afb49a03"
|
||||
version = "v0.13.0"
|
||||
revision = "eaddaf6dd7ee35fd3c2420c8d27478db176b0485"
|
||||
version = "v0.15.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "df4dd90d076ebbf6e87d08d3f00bfac8ff4bde1a"
|
||||
version = "v10.3.1-beta"
|
||||
revision = "509eea43b93cec2f3f17acbe2578ef58703923f8"
|
||||
version = "v11.1.1-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "5432abe734f8d95c78340cd56712f912906e6514"
|
||||
version = "v8.3.1"
|
||||
revision = "7aa5b8a6f18b5c15910c767ab005fc4585221177"
|
||||
version = "v9.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cenkalti/backoff"
|
||||
packages = ["."]
|
||||
revision = "61153c768f31ee5f130071d08fc82b85208528de"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
@@ -34,14 +40,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
revision = "79e699ccd02f240a1f1fbbdcee7e64c1c12e41aa"
|
||||
revision = "77ed807830b4df581417e7f89eb81d4872832b72"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
@@ -52,14 +58,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
revision = "5b3e00af70a9484542169a976dcab8d03e601a17"
|
||||
version = "v1.30.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "17ce1425424ab154092bbb43af630bd647f3bb0d"
|
||||
revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
@@ -67,6 +73,12 @@
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
@@ -76,8 +88,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/kurin/blazer"
|
||||
packages = ["b2","base","internal/b2types","internal/blog"]
|
||||
revision = "1a870c3ee8b83e17d762307c6eae8f390ac3f4a0"
|
||||
version = "v0.1.1"
|
||||
revision = "e269a1a17bb6aec278c06a57cb7e8f8d0d333e04"
|
||||
version = "v0.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -95,7 +107,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "9d3f812e23d270d1c66a9a01e20af1005061cdc4"
|
||||
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -124,8 +136,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/restic/chunker"
|
||||
packages = ["."]
|
||||
revision = "bb2ecf9a98e35a0b336ffc23fc515fb6e7961577"
|
||||
version = "v0.1.0"
|
||||
revision = "db83917be3b88cc307464b7d8a221c173e34a0db"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
@@ -149,7 +161,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
@@ -161,31 +173,31 @@
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
|
||||
revision = "faadfbdc035307d901e69eea569f5dda451a3ee3"
|
||||
revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp"]
|
||||
revision = "b129b8e0fbeb39c8358e51a07ab6c50ad415e72e"
|
||||
revision = "cd69bc3fc700721b709c3a59e16e24c67b58f6ff"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "13449ad91cb26cb47661c1b080790392170385fd"
|
||||
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "062cd7e4e68206d8bab9b18396626e855c992658"
|
||||
revision = "8dbc5d05d6edcc104950cc299a1ce6641235bc86"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "2fe03ca2dc379c00d654a4459d1a50812cac2848"
|
||||
revision = "7afc123cf726cd2f253faa3e144d2ab65477b18f"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
@@ -202,6 +214,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "53e4779dc4c7de2cd8b195f13c215c24da5efc5e33acf584615b5c43bfefd2db"
|
||||
inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
57
Gopkg.toml
57
Gopkg.toml
@@ -19,60 +19,3 @@
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kurin/blazer"
|
||||
version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/minio/minio-go"
|
||||
version = "3.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/profile"
|
||||
version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/xattr"
|
||||
version = "0.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/restic/chunker"
|
||||
version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
18
LICENSE
18
LICENSE
@@ -1,19 +1,21 @@
|
||||
BSD 2-Clause License
|
||||
|
||||
Copyright (c) 2014, Alexander Neumann <alexander@bumpern.de>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
|
26
README.rst
26
README.rst
@@ -1,4 +1,4 @@
|
||||
|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks|
|
||||
|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage|
|
||||
|
||||
Introduction
|
||||
------------
|
||||
@@ -13,7 +13,7 @@ Quick start
|
||||
-----------
|
||||
|
||||
Once you've `installed
|
||||
<https://restic.readthedocs.io/en/latest/installation.html>`__ restic, start
|
||||
<https://restic.readthedocs.io/en/latest/020_installation.html>`__ restic, start
|
||||
off with creating a repository for your backups:
|
||||
|
||||
.. code-block:: console
|
||||
@@ -41,7 +41,7 @@ Next you can either use ``restic restore`` to restore files or use ``restic
|
||||
mount`` to mount the repository via fuse and browse the files from previous
|
||||
snapshots.
|
||||
|
||||
For more options check out the `manual guide <https://restic.readthedocs.io/en/latest/manual.html>`__.
|
||||
For more options check out the `online documentation <https://restic.readthedocs.io/en/latest/>`__.
|
||||
|
||||
Backends
|
||||
--------
|
||||
@@ -49,14 +49,14 @@ Backends
|
||||
Saving a backup on the same machine is nice but not a real backup strategy.
|
||||
Therefore, restic supports the following backends for storing backups natively:
|
||||
|
||||
- `Local directory <https://restic.readthedocs.io/en/latest/manual.html#local>`__
|
||||
- `sftp server (via SSH) <https://restic.readthedocs.io/en/latest/manual.html#sftp>`__
|
||||
- `HTTP REST server <https://restic.readthedocs.io/en/latest/manual.html#rest-server>`__ (`protocol <doc/rest_backend.rst>`__ `rest-server <https://github.com/restic/rest-server>`__)
|
||||
- `AWS S3 <https://restic.readthedocs.io/en/latest/manual.html#amazon-s3>`__ (either from Amazon or using the `Minio <https://minio.io>`__ server)
|
||||
- `OpenStack Swift <https://restic.readthedocs.io/en/latest/manual.html#openstack-swift>`__
|
||||
- `BackBlaze B2 <https://restic.readthedocs.io/en/latest/manual.html#backblaze-b2>`__
|
||||
- `Microsoft Azure Blob Storage <https://restic.readthedocs.io/en/latest/manual.html#microsoft-azure-blob-storage>`__
|
||||
- `Google Cloud Storage <https://restic.readthedocs.io/en/latest/manual.html#google-cloud-storage>`__
|
||||
- `Local directory <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#local>`__
|
||||
- `sftp server (via SSH) <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#sftp>`__
|
||||
- `HTTP REST server <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server>`__ (`protocol <doc/rest_backend.rst>`__ `rest-server <https://github.com/restic/rest-server>`__)
|
||||
- `AWS S3 <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3>`__ (either from Amazon or using the `Minio <https://minio.io>`__ server)
|
||||
- `OpenStack Swift <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#openstack-swift>`__
|
||||
- `BackBlaze B2 <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#backblaze-b2>`__
|
||||
- `Microsoft Azure Blob Storage <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#microsoft-azure-blob-storage>`__
|
||||
- `Google Cloud Storage <https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#google-cloud-storage>`__
|
||||
|
||||
Design Principles
|
||||
-----------------
|
||||
@@ -107,7 +107,7 @@ the `development blog <https://restic.github.io/blog/>`__.
|
||||
License
|
||||
-------
|
||||
|
||||
Restic is licensed under "BSD 2-Clause License". You can find the
|
||||
Restic is licensed under `BSD 2-Clause License <https://opensource.org/licenses/BSD-2-Clause>`__. You can find the
|
||||
complete text in ``LICENSE``.
|
||||
|
||||
.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest
|
||||
@@ -120,3 +120,5 @@ complete text in ``LICENSE``.
|
||||
:target: https://goreportcard.com/report/github.com/restic/restic
|
||||
.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg
|
||||
:target: https://saythanks.io/to/restic
|
||||
.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg
|
||||
:target: https://codecov.io/gh/restic/restic
|
||||
|
@@ -14,15 +14,27 @@ var cleanupHandlers struct {
|
||||
sync.Mutex
|
||||
list []func() error
|
||||
done bool
|
||||
ch chan os.Signal
|
||||
}
|
||||
|
||||
var stderr = os.Stderr
|
||||
|
||||
func init() {
|
||||
c := make(chan os.Signal)
|
||||
signal.Notify(c, syscall.SIGINT)
|
||||
cleanupHandlers.ch = make(chan os.Signal)
|
||||
go CleanupHandler(cleanupHandlers.ch)
|
||||
InstallSignalHandler()
|
||||
}
|
||||
|
||||
go CleanupHandler(c)
|
||||
// InstallSignalHandler listens for SIGINT and SIGPIPE, and triggers the cleanup handlers.
|
||||
func InstallSignalHandler() {
|
||||
signal.Notify(cleanupHandlers.ch, syscall.SIGINT)
|
||||
signal.Notify(cleanupHandlers.ch, syscall.SIGPIPE)
|
||||
}
|
||||
|
||||
// SuspendSignalHandler removes the signal handler for SIGINT and SIGPIPE.
|
||||
func SuspendSignalHandler() {
|
||||
signal.Reset(syscall.SIGINT)
|
||||
signal.Reset(syscall.SIGPIPE)
|
||||
}
|
||||
|
||||
// AddCleanupHandler adds the function f to the list of cleanup handlers so
|
||||
@@ -57,12 +69,18 @@ func RunCleanupHandlers() {
|
||||
cleanupHandlers.list = nil
|
||||
}
|
||||
|
||||
// CleanupHandler handles the SIGINT signal.
|
||||
// CleanupHandler handles the SIGINT and SIGPIPE signals.
|
||||
func CleanupHandler(c <-chan os.Signal) {
|
||||
for s := range c {
|
||||
debug.Log("signal %v received, cleaning up", s)
|
||||
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
|
||||
Exit(0)
|
||||
fmt.Fprintf(stderr, "%ssignal %v received, cleaning up\n", ClearLine(), s)
|
||||
|
||||
code := 0
|
||||
if s != syscall.SIGINT {
|
||||
code = 1
|
||||
}
|
||||
|
||||
Exit(code)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,37 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cmdAutocomplete = &cobra.Command{
|
||||
Use: "autocomplete",
|
||||
Short: "Generate shell autocompletion script",
|
||||
Long: `The "autocomplete" command generates a shell autocompletion script.
|
||||
|
||||
NOTE: The current version supports Bash only.
|
||||
This should work for *nix systems with Bash installed.
|
||||
|
||||
By default, the file is written directly to /etc/bash_completion.d
|
||||
for convenience, and the command may need superuser rights, e.g.:
|
||||
|
||||
$ sudo restic autocomplete`,
|
||||
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := cmdRoot.GenBashCompletionFile(autocompleteTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var autocompleteTarget string
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdAutocomplete)
|
||||
|
||||
cmdAutocomplete.Flags().StringVarP(&autocompleteTarget, "completionfile", "", "/usr/share/bash-completion/completions/restic", "autocompletion file")
|
||||
// For bash-completion
|
||||
cmdAutocomplete.Flags().SetAnnotation("completionfile", cobra.BashCompFilenameExt, []string{})
|
||||
}
|
@@ -83,7 +83,7 @@ func init() {
|
||||
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
|
||||
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.Hostname, "hostname", "", "set the `hostname` for the snapshot manually")
|
||||
f.StringVar(&backupOptions.Hostname, "hostname", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
|
||||
f.StringVar(&backupOptions.FilesFrom, "files-from", "", "read the files to backup from file (can be combined with file args)")
|
||||
f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
|
||||
}
|
||||
@@ -298,9 +298,14 @@ func readLinesFromFile(filename string) ([]string, error) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// ignore empty lines
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
// strip comments
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
||||
@@ -367,8 +372,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
|
||||
}
|
||||
|
||||
rc := &rejectionCache{}
|
||||
for _, spec := range opts.ExcludeIfPresent {
|
||||
f, err := rejectIfPresent(spec)
|
||||
f, err := rejectIfPresent(spec, rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,6 +393,16 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// exclude restic cache
|
||||
if repo.Cache != nil {
|
||||
f, err := rejectResticCache(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rejectFuncs = append(rejectFuncs, f)
|
||||
}
|
||||
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -406,7 +422,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
// Find last snapshot to set it as parent, if not already set
|
||||
if !opts.Force && parentSnapshotID == nil {
|
||||
id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, []restic.TagList{opts.Tags}, opts.Hostname)
|
||||
id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, []restic.TagList{}, opts.Hostname)
|
||||
if err == nil {
|
||||
parentSnapshotID = &id
|
||||
} else if err != restic.ErrNoSnapshotFound {
|
||||
|
@@ -19,6 +19,9 @@ var cmdCheck = &cobra.Command{
|
||||
Long: `
|
||||
The "check" command tests the repository for errors and reports any errors it
|
||||
finds. It can also be used to read all data and therefore simulate a restore.
|
||||
|
||||
By default, the "check" command will always load all data directly from the
|
||||
repository and not use a local cache.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -30,6 +33,7 @@ finds. It can also be used to read all data and therefore simulate a restore.
|
||||
type CheckOptions struct {
|
||||
ReadData bool
|
||||
CheckUnused bool
|
||||
WithCache bool
|
||||
}
|
||||
|
||||
var checkOptions CheckOptions
|
||||
@@ -40,6 +44,7 @@ func init() {
|
||||
f := cmdCheck.Flags()
|
||||
f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs")
|
||||
f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "find unused blobs")
|
||||
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
|
||||
}
|
||||
|
||||
func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
|
||||
@@ -77,13 +82,18 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("check has no arguments")
|
||||
}
|
||||
|
||||
if !opts.WithCache {
|
||||
// do not use a cache for the checker
|
||||
gopts.NoCache = true
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
Verbosef("Create exclusive lock for repository\n")
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -93,7 +103,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
chkr := checker.New(repo)
|
||||
|
||||
Verbosef("Load indexes\n")
|
||||
Verbosef("load indexes\n")
|
||||
hints, errs := chkr.LoadIndex(context.TODO())
|
||||
|
||||
dupFound := false
|
||||
@@ -118,7 +128,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
errorsFound := false
|
||||
errChan := make(chan error)
|
||||
|
||||
Verbosef("Check all packs\n")
|
||||
Verbosef("check all packs\n")
|
||||
go chkr.Packs(context.TODO(), errChan)
|
||||
|
||||
for err := range errChan {
|
||||
@@ -126,7 +136,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
}
|
||||
|
||||
Verbosef("Check snapshots, trees and blobs\n")
|
||||
Verbosef("check snapshots, trees and blobs\n")
|
||||
errChan = make(chan error)
|
||||
go chkr.Structure(context.TODO(), errChan)
|
||||
|
||||
@@ -150,7 +160,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
if opts.ReadData {
|
||||
Verbosef("Read all data\n")
|
||||
Verbosef("read all data\n")
|
||||
|
||||
p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
|
||||
errChan := make(chan error)
|
||||
@@ -166,5 +176,8 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
if errorsFound {
|
||||
return errors.Fatal("repository contains errors")
|
||||
}
|
||||
|
||||
Verbosef("no errors were found\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
217
cmd/restic/cmd_debug.go
Normal file
217
cmd/restic/cmd_debug.go
Normal file
@@ -0,0 +1,217 @@
|
||||
// +build debug
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/pack"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/worker"
|
||||
)
|
||||
|
||||
var cmdDebug = &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "Debug commands",
|
||||
}
|
||||
|
||||
var cmdDebugDump = &cobra.Command{
|
||||
Use: "dump [indexes|snapshots|all|packs]",
|
||||
Short: "Dump data structures",
|
||||
Long: `
|
||||
The "dump" command dumps data structures from the repository as JSON objects. It
|
||||
is used for debugging purposes only.`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runDebugDump(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdDebug)
|
||||
cmdDebug.AddCommand(cmdDebugDump)
|
||||
}
|
||||
|
||||
func prettyPrintJSON(wr io.Writer, item interface{}) error {
|
||||
buf, err := json.MarshalIndent(item, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = wr.Write(append(buf, '\n'))
|
||||
return err
|
||||
}
|
||||
|
||||
func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
for id := range repo.List(context.TODO(), restic.SnapshotFile) {
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(wr, "snapshot_id: %v\n", id)
|
||||
|
||||
err = prettyPrintJSON(wr, snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const dumpPackWorkers = 10
|
||||
|
||||
// Pack is the struct used in printPacks.
|
||||
type Pack struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
Blobs []Blob `json:"blobs"`
|
||||
}
|
||||
|
||||
// Blob is the struct used in printPacks.
|
||||
type Blob struct {
|
||||
Type restic.BlobType `json:"type"`
|
||||
Length uint `json:"length"`
|
||||
ID restic.ID `json:"id"`
|
||||
Offset uint `json:"offset"`
|
||||
}
|
||||
|
||||
func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
f := func(ctx context.Context, job worker.Job) (interface{}, error) {
|
||||
name := job.Data.(string)
|
||||
|
||||
h := restic.Handle{Type: restic.DataFile, Name: name}
|
||||
|
||||
blobInfo, err := repo.Backend().Stat(ctx, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
jobCh := make(chan worker.Job)
|
||||
resCh := make(chan worker.Job)
|
||||
wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
|
||||
|
||||
go func() {
|
||||
for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
|
||||
jobCh <- worker.Job{Data: name}
|
||||
}
|
||||
close(jobCh)
|
||||
}()
|
||||
|
||||
for job := range resCh {
|
||||
name := job.Data.(string)
|
||||
|
||||
if job.Error != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
entries := job.Result.([]restic.Blob)
|
||||
p := Pack{
|
||||
Name: name,
|
||||
Blobs: make([]Blob, len(entries)),
|
||||
}
|
||||
for i, blob := range entries {
|
||||
p.Blobs[i] = Blob{
|
||||
Type: blob.Type,
|
||||
Length: blob.Length,
|
||||
ID: blob.ID,
|
||||
Offset: blob.Offset,
|
||||
}
|
||||
}
|
||||
|
||||
prettyPrintJSON(os.Stdout, p)
|
||||
}
|
||||
|
||||
wp.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpIndexes(repo restic.Repository) error {
|
||||
for id := range repo.List(context.TODO(), restic.IndexFile) {
|
||||
fmt.Printf("index_id: %v\n", id)
|
||||
|
||||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.Dump(os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDebugDump(gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.Fatal("type not specified")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tpe := args[0]
|
||||
|
||||
switch tpe {
|
||||
case "indexes":
|
||||
return dumpIndexes(repo)
|
||||
case "snapshots":
|
||||
return debugPrintSnapshots(repo, os.Stdout)
|
||||
case "packs":
|
||||
return printPacks(repo, os.Stdout)
|
||||
case "all":
|
||||
fmt.Printf("snapshots:\n")
|
||||
err := debugPrintSnapshots(repo, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\nindexes:\n")
|
||||
err = dumpIndexes(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return errors.Fatalf("no such type %q", tpe)
|
||||
}
|
||||
}
|
@@ -1,168 +1,134 @@
|
||||
// xbuild debug
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/pack"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/worker"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cmdDump = &cobra.Command{
|
||||
Use: "dump [indexes|snapshots|trees|all|packs]",
|
||||
Short: "Dump data structures",
|
||||
Use: "dump [flags] snapshotID file",
|
||||
Short: "Print a backed-up file to stdout",
|
||||
Long: `
|
||||
The "dump" command dumps data structures from the repository as JSON objects. It
|
||||
is used for debugging purposes only.`,
|
||||
The "dump" command extracts a single file from a snapshot from the repository and
|
||||
prints its contents to stdout.
|
||||
|
||||
The special snapshot "latest" can be used to use the latest snapshot in the
|
||||
repository.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runDump(globalOptions, args)
|
||||
return runDump(dumpOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// DumpOptions collects all options for the dump command.
|
||||
type DumpOptions struct {
|
||||
Host string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
}
|
||||
|
||||
var dumpOptions DumpOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdDump)
|
||||
|
||||
flags := cmdDump.Flags()
|
||||
flags.StringVarP(&dumpOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
|
||||
flags.Var(&dumpOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"")
|
||||
flags.StringArrayVar(&dumpOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
|
||||
}
|
||||
|
||||
func prettyPrintJSON(wr io.Writer, item interface{}) error {
|
||||
buf, err := json.MarshalIndent(item, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
func splitPath(path string) []string {
|
||||
d, f := filepath.Split(path)
|
||||
if d == "" || d == "/" {
|
||||
return []string{f}
|
||||
}
|
||||
|
||||
_, err = wr.Write(append(buf, '\n'))
|
||||
return err
|
||||
s := splitPath(filepath.Clean(d))
|
||||
return append(s, f)
|
||||
}
|
||||
|
||||
func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
for id := range repo.List(context.TODO(), restic.SnapshotFile) {
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(wr, "snapshot_id: %v\n", id)
|
||||
|
||||
err = prettyPrintJSON(wr, snapshot)
|
||||
func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error {
|
||||
var buf []byte
|
||||
for _, id := range node.Content {
|
||||
size, err := repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) < restic.CiphertextLength(int(size)) {
|
||||
buf = restic.NewBlobBuffer(int(size))
|
||||
}
|
||||
|
||||
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const dumpPackWorkers = 10
|
||||
|
||||
// Pack is the struct used in printPacks.
|
||||
type Pack struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
Blobs []Blob `json:"blobs"`
|
||||
}
|
||||
|
||||
// Blob is the struct used in printPacks.
|
||||
type Blob struct {
|
||||
Type restic.BlobType `json:"type"`
|
||||
Length uint `json:"length"`
|
||||
ID restic.ID `json:"id"`
|
||||
Offset uint `json:"offset"`
|
||||
}
|
||||
|
||||
func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
f := func(ctx context.Context, job worker.Job) (interface{}, error) {
|
||||
name := job.Data.(string)
|
||||
|
||||
h := restic.Handle{Type: restic.DataFile, Name: name}
|
||||
|
||||
blobInfo, err := repo.Backend().Stat(ctx, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error {
|
||||
if tree == nil {
|
||||
return fmt.Errorf("called with a nil tree")
|
||||
}
|
||||
|
||||
jobCh := make(chan worker.Job)
|
||||
resCh := make(chan worker.Job)
|
||||
wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
|
||||
|
||||
go func() {
|
||||
for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
|
||||
jobCh <- worker.Job{Data: name}
|
||||
}
|
||||
close(jobCh)
|
||||
}()
|
||||
|
||||
for job := range resCh {
|
||||
name := job.Data.(string)
|
||||
|
||||
if job.Error != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
entries := job.Result.([]restic.Blob)
|
||||
p := Pack{
|
||||
Name: name,
|
||||
Blobs: make([]Blob, len(entries)),
|
||||
}
|
||||
for i, blob := range entries {
|
||||
p.Blobs[i] = Blob{
|
||||
Type: blob.Type,
|
||||
Length: blob.Length,
|
||||
ID: blob.ID,
|
||||
Offset: blob.Offset,
|
||||
if repo == nil {
|
||||
return fmt.Errorf("called with a nil repository")
|
||||
}
|
||||
l := len(pathComponents)
|
||||
if l == 0 {
|
||||
return fmt.Errorf("empty path components")
|
||||
}
|
||||
item := filepath.Join(prefix, pathComponents[0])
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Name == pathComponents[0] {
|
||||
switch {
|
||||
case l == 1 && node.Type == "file":
|
||||
return dumpNode(ctx, repo, node)
|
||||
case l > 1 && node.Type == "dir":
|
||||
subtree, err := repo.LoadTree(ctx, *node.Subtree)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot load subtree for %q", item)
|
||||
}
|
||||
return printFromTree(ctx, subtree, repo, item, pathComponents[1:])
|
||||
case l > 1:
|
||||
return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type)
|
||||
case node.Type != "file":
|
||||
return fmt.Errorf("%q should be a file, but is a %q", item, node.Type)
|
||||
}
|
||||
}
|
||||
|
||||
prettyPrintJSON(os.Stdout, p)
|
||||
}
|
||||
|
||||
wp.Wait()
|
||||
|
||||
return nil
|
||||
return fmt.Errorf("path %q not found in snapshot", item)
|
||||
}
|
||||
|
||||
func dumpIndexes(repo restic.Repository) error {
|
||||
for id := range repo.List(context.TODO(), restic.IndexFile) {
|
||||
fmt.Printf("index_id: %v\n", id)
|
||||
func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
ctx := gopts.ctx
|
||||
|
||||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.Dump(os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args) != 2 {
|
||||
return errors.Fatal("no file and no snapshot ID specified")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
snapshotIDString := args[0]
|
||||
pathToPrint := args[1]
|
||||
|
||||
func runDump(gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.Fatal("type not specified")
|
||||
}
|
||||
debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
|
||||
|
||||
splittedPath := splitPath(pathToPrint)
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
@@ -177,35 +143,39 @@ func runDump(gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
err = repo.LoadIndex(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tpe := args[0]
|
||||
var id restic.ID
|
||||
|
||||
switch tpe {
|
||||
case "indexes":
|
||||
return dumpIndexes(repo)
|
||||
case "snapshots":
|
||||
return debugPrintSnapshots(repo, os.Stdout)
|
||||
case "packs":
|
||||
return printPacks(repo, os.Stdout)
|
||||
case "all":
|
||||
fmt.Printf("snapshots:\n")
|
||||
err := debugPrintSnapshots(repo, os.Stdout)
|
||||
if snapshotIDString == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
|
||||
}
|
||||
|
||||
fmt.Printf("\nindexes:\n")
|
||||
err = dumpIndexes(repo)
|
||||
} else {
|
||||
id, err = restic.FindSnapshot(repo, snapshotIDString)
|
||||
if err != nil {
|
||||
return err
|
||||
Exitf(1, "invalid id %q: %v", snapshotIDString, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return errors.Fatalf("no such type %q", tpe)
|
||||
}
|
||||
|
||||
sn, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
if err != nil {
|
||||
Exitf(2, "loading snapshot %q failed: %v", snapshotIDString, err)
|
||||
}
|
||||
|
||||
tree, err := repo.LoadTree(ctx, *sn.Tree)
|
||||
if err != nil {
|
||||
Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err)
|
||||
}
|
||||
|
||||
err = printFromTree(ctx, tree, repo, "", splittedPath)
|
||||
if err != nil {
|
||||
Exitf(2, "cannot dump file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -35,9 +35,10 @@ type ForgetOptions struct {
|
||||
Yearly int
|
||||
KeepTags restic.TagLists
|
||||
|
||||
Host string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Host string
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Compact bool
|
||||
|
||||
// Grouping
|
||||
GroupBy string
|
||||
@@ -65,6 +66,7 @@ func init() {
|
||||
f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname` (deprecated)")
|
||||
f.Var(&forgetOptions.Tags, "tag", "only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)")
|
||||
f.StringArrayVar(&forgetOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` (can be specified multiple times)")
|
||||
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact format")
|
||||
|
||||
f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "string for grouping snapshots by host,paths,tags")
|
||||
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
||||
@@ -114,6 +116,8 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
removeSnapshots := 0
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
@@ -125,11 +129,12 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
removeSnapshots++
|
||||
} else {
|
||||
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
|
||||
}
|
||||
} else {
|
||||
// Determing grouping-keys
|
||||
// Determining grouping-keys
|
||||
var tags []string
|
||||
var hostname string
|
||||
var paths []string
|
||||
@@ -176,7 +181,6 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
removeSnapshots := 0
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
var key key
|
||||
if json.Unmarshal([]byte(k), &key) != nil {
|
||||
@@ -204,13 +208,13 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, false)
|
||||
PrintSnapshots(globalOptions.stdout, keep, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, false)
|
||||
PrintSnapshots(globalOptions.stdout, remove, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
|
94
cmd/restic/cmd_generate.go
Normal file
94
cmd/restic/cmd_generate.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
var cmdGenerate = &cobra.Command{
|
||||
Use: "generate [command]",
|
||||
Short: "Generate manual pages and auto-completion files (bash, zsh)",
|
||||
Long: `
|
||||
The "generate" command writes automatically generated files like the man pages
|
||||
and the auto-completion files for bash and zsh).
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: runGenerate,
|
||||
}
|
||||
|
||||
type generateOptions struct {
|
||||
ManDir string
|
||||
BashCompletionFile string
|
||||
ZSHCompletionFile string
|
||||
}
|
||||
|
||||
var genOpts generateOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdGenerate)
|
||||
fs := cmdGenerate.Flags()
|
||||
fs.StringVar(&genOpts.ManDir, "man", "", "write man pages to `directory`")
|
||||
fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file`")
|
||||
fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file`")
|
||||
}
|
||||
|
||||
func writeManpages(dir string) error {
|
||||
// use a fixed date for the man pages so that generating them is deterministic
|
||||
date, err := time.Parse("Jan 2006", "Jan 2017")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := &doc.GenManHeader{
|
||||
Title: "restic backup",
|
||||
Section: "1",
|
||||
Source: "generated by `restic generate`",
|
||||
Date: &date,
|
||||
}
|
||||
|
||||
Verbosef("writing man pages to directory %v\n", dir)
|
||||
return doc.GenManTree(cmdRoot, header, dir)
|
||||
}
|
||||
|
||||
func writeBashCompletion(file string) error {
|
||||
Verbosef("writing bash completion file to %v\n", file)
|
||||
return cmdRoot.GenBashCompletionFile(file)
|
||||
}
|
||||
|
||||
func writeZSHCompletion(file string) error {
|
||||
Verbosef("writing zsh completion file to %v\n", file)
|
||||
return cmdRoot.GenZshCompletionFile(file)
|
||||
}
|
||||
|
||||
func runGenerate(cmd *cobra.Command, args []string) error {
|
||||
if genOpts.ManDir != "" {
|
||||
err := writeManpages(genOpts.ManDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if genOpts.BashCompletionFile != "" {
|
||||
err := writeBashCompletion(genOpts.BashCompletionFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if genOpts.ZSHCompletionFile != "" {
|
||||
err := writeZSHCompletion(genOpts.ZSHCompletionFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var empty generateOptions
|
||||
if genOpts == empty {
|
||||
return errors.Fatal("nothing to do, please specify at least one output file/dir")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,70 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
var cmdManpage = &cobra.Command{
|
||||
Use: "manpage [command]",
|
||||
Short: "Generate manual pages",
|
||||
Long: `
|
||||
The "manpage" command generates a manual page for a single command. It can also
|
||||
be used to write all manual pages to a directory. If the output directory is
|
||||
set and no command is specified, all manpages are written to the directory.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: runManpage,
|
||||
}
|
||||
|
||||
var manpageOpts = struct {
|
||||
OutputDir string
|
||||
}{}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdManpage)
|
||||
fs := cmdManpage.Flags()
|
||||
fs.StringVar(&manpageOpts.OutputDir, "output-dir", "", "write man pages to this `directory`")
|
||||
}
|
||||
|
||||
func runManpage(cmd *cobra.Command, args []string) error {
|
||||
// use a fixed date for the man pages so that generating them is deterministic
|
||||
date, err := time.Parse("Jan 2006", "Jan 2017")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := &doc.GenManHeader{
|
||||
Title: "restic backup",
|
||||
Section: "1",
|
||||
Source: "generated by `restic manpage`",
|
||||
Date: &date,
|
||||
}
|
||||
|
||||
dir := manpageOpts.OutputDir
|
||||
if dir != "" {
|
||||
Verbosef("writing man pages to directory %v\n", dir)
|
||||
return doc.GenManTree(cmdRoot, header, dir)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(args) == 0:
|
||||
return errors.Fatalf("no command given")
|
||||
case len(args) > 1:
|
||||
return errors.Fatalf("more than one command given: %v", args)
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
|
||||
for _, cmd := range cmdRoot.Commands() {
|
||||
if cmd.Name() == name {
|
||||
return doc.GenMan(cmd, header, os.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Fatalf("command %q is not known", args)
|
||||
}
|
@@ -67,6 +67,12 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@@ -85,6 +85,25 @@ func runPrune(gopts GlobalOptions) error {
|
||||
return pruneRepository(gopts, repo)
|
||||
}
|
||||
|
||||
func mixedBlobs(list []restic.Blob) bool {
|
||||
var tree, data bool
|
||||
|
||||
for _, pb := range list {
|
||||
switch pb.Type {
|
||||
case restic.TreeBlob:
|
||||
tree = true
|
||||
case restic.DataBlob:
|
||||
data = true
|
||||
}
|
||||
|
||||
if tree && data {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
ctx := gopts.ctx
|
||||
|
||||
@@ -122,7 +141,7 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
stats.bytes += pack.Size
|
||||
blobs += len(pack.Entries)
|
||||
}
|
||||
Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
|
||||
Verbosef("repository contains %v packs (%v blobs) with %v\n",
|
||||
len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
|
||||
|
||||
blobCount := make(map[restic.BlobHandle]int)
|
||||
@@ -191,6 +210,11 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
||||
// find packs that need a rewrite
|
||||
rewritePacks := restic.NewIDSet()
|
||||
for _, pack := range idx.Packs {
|
||||
if mixedBlobs(pack.Entries) {
|
||||
rewritePacks.Insert(pack.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, blob := range pack.Entries {
|
||||
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||
if !usedBlobs.Has(h) {
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,6 +30,7 @@ type SnapshotOptions struct {
|
||||
Tags restic.TagLists
|
||||
Paths []string
|
||||
Compact bool
|
||||
Last bool
|
||||
}
|
||||
|
||||
var snapshotOptions SnapshotOptions
|
||||
@@ -41,6 +43,7 @@ func init() {
|
||||
f.Var(&snapshotOptions.Tags, "tag", "only consider snapshots which include this `taglist` (can be specified multiple times)")
|
||||
f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
|
||||
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
|
||||
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
|
||||
}
|
||||
|
||||
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||
@@ -64,6 +67,11 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
list = append(list, sn)
|
||||
}
|
||||
|
||||
if opts.Last {
|
||||
list = FilterLastSnapshots(list)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(list))
|
||||
|
||||
if gopts.JSON {
|
||||
@@ -78,9 +86,50 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterLastSnapshotsKey is used by FilterLastSnapshots.
|
||||
type filterLastSnapshotsKey struct {
|
||||
Hostname string
|
||||
JoinedPaths string
|
||||
}
|
||||
|
||||
// newFilterLastSnapshotsKey initializes a filterLastSnapshotsKey from a Snapshot
|
||||
func newFilterLastSnapshotsKey(sn *restic.Snapshot) filterLastSnapshotsKey {
|
||||
// Shallow slice copy
|
||||
var paths = make([]string, len(sn.Paths))
|
||||
copy(paths, sn.Paths)
|
||||
sort.Strings(paths)
|
||||
return filterLastSnapshotsKey{sn.Hostname, strings.Join(paths, "|")}
|
||||
}
|
||||
|
||||
// FilterLastSnapshots filters a list of snapshots to only return the last
|
||||
// entry for each hostname and path. If the snapshot contains multiple paths,
|
||||
// they will be joined and treated as one item.
|
||||
func FilterLastSnapshots(list restic.Snapshots) restic.Snapshots {
|
||||
// Sort the snapshots so that the newer ones are listed first
|
||||
sort.SliceStable(list, func(i, j int) bool {
|
||||
return list[i].Time.After(list[j].Time)
|
||||
})
|
||||
|
||||
var results restic.Snapshots
|
||||
seen := make(map[filterLastSnapshotsKey]bool)
|
||||
for _, sn := range list {
|
||||
key := newFilterLastSnapshotsKey(sn)
|
||||
if !seen[key] {
|
||||
seen[key] = true
|
||||
results = append(results, sn)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// PrintSnapshots prints a text table of the snapshots in list to stdout.
|
||||
func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
|
||||
|
||||
// always sort the snapshots so that the newer ones are listed last
|
||||
sort.SliceStable(list, func(i, j int) bool {
|
||||
return list[i].Time.Before(list[j].Time)
|
||||
})
|
||||
|
||||
// Determine the max widths for host and tag.
|
||||
maxHost, maxTag := 10, 6
|
||||
for _, sn := range list {
|
||||
@@ -158,6 +207,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
|
||||
}
|
||||
}
|
||||
|
||||
tab.Footer = fmt.Sprintf("%d snapshots", len(list))
|
||||
|
||||
tab.Write(stdout)
|
||||
}
|
||||
|
||||
@@ -165,7 +216,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
|
||||
type Snapshot struct {
|
||||
*restic.Snapshot
|
||||
|
||||
ID *restic.ID `json:"id"`
|
||||
ID *restic.ID `json:"id"`
|
||||
ShortID string `json:"short_id"`
|
||||
}
|
||||
|
||||
// printSnapshotsJSON writes the JSON representation of list to stdout.
|
||||
@@ -178,6 +230,7 @@ func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
|
@@ -113,7 +113,7 @@ func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
Verbosef("Create exclusive lock for repository\n")
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -135,9 +135,9 @@ func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
}
|
||||
if changeCnt == 0 {
|
||||
Verbosef("No snapshots were modified\n")
|
||||
Verbosef("no snapshots were modified\n")
|
||||
} else {
|
||||
Verbosef("Modified tags on %v snapshots\n", changeCnt)
|
||||
Verbosef("modified tags on %v snapshots\n", changeCnt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -7,13 +7,59 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/filter"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
)
|
||||
|
||||
type rejectionCache struct {
|
||||
m map[string]bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
// Lock locks the mutex in rc.
|
||||
func (rc *rejectionCache) Lock() {
|
||||
if rc != nil {
|
||||
rc.mtx.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks the mutex in rc.
|
||||
func (rc *rejectionCache) Unlock() {
|
||||
if rc != nil {
|
||||
rc.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the last stored value for dir and a second boolean that
|
||||
// indicates whether that value was actually written to the cache. It is the
|
||||
// callers responsibility to call rc.Lock and rc.Unlock before using this
|
||||
// method, otherwise data races may occur.
|
||||
func (rc *rejectionCache) Get(dir string) (bool, bool) {
|
||||
if rc == nil || rc.m == nil {
|
||||
return false, false
|
||||
}
|
||||
v, ok := rc.m[dir]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// Store stores a new value for dir. It is the callers responsibility to call
|
||||
// rc.Lock and rc.Unlock before using this method, otherwise data races may
|
||||
// occur.
|
||||
func (rc *rejectionCache) Store(dir string, rejected bool) {
|
||||
if rc == nil {
|
||||
return
|
||||
}
|
||||
if rc.m == nil {
|
||||
rc.m = make(map[string]bool)
|
||||
}
|
||||
rc.m[dir] = rejected
|
||||
}
|
||||
|
||||
// RejectFunc is a function that takes a filename and os.FileInfo of a
|
||||
// file that would be included in the backup. The function returns true if it
|
||||
// should be excluded (rejected) from the backup.
|
||||
@@ -41,8 +87,10 @@ func rejectByPattern(patterns []string) RejectFunc {
|
||||
// should be excluded. The RejectFunc considers a file to be excluded when
|
||||
// it resides in a directory with an exclusion file, that is specified by
|
||||
// excludeFileSpec in the form "filename[:content]". The returned error is
|
||||
// non-nil if the filename component of excludeFileSpec is empty.
|
||||
func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) {
|
||||
// non-nil if the filename component of excludeFileSpec is empty. If rc is
|
||||
// non-nil, it is going to be used in the RejectFunc to expedite the evaluation
|
||||
// of a directory based on previous visits.
|
||||
func rejectIfPresent(excludeFileSpec string, rc *rejectionCache) (RejectFunc, error) {
|
||||
if excludeFileSpec == "" {
|
||||
return nil, errors.New("name for exclusion tagfile is empty")
|
||||
}
|
||||
@@ -59,15 +107,17 @@ func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) {
|
||||
}
|
||||
debug.Log("using %q as exclusion tagfile", tf)
|
||||
fn := func(filename string, _ os.FileInfo) bool {
|
||||
return isExcludedByFile(filename, tf, tc)
|
||||
return isExcludedByFile(filename, tf, tc, rc)
|
||||
}
|
||||
return fn, nil
|
||||
}
|
||||
|
||||
// isExcludedByFile interprets filename as a path and returns true if that file
|
||||
// is in a excluded directory. A directory is identified as excluded if it contains a
|
||||
// tagfile which bears the name specified in tagFilename and starts with header.
|
||||
func isExcludedByFile(filename, tagFilename, header string) bool {
|
||||
// tagfile which bears the name specified in tagFilename and starts with
|
||||
// header. If rc is non-nil, it is used to expedite the evaluation of a
|
||||
// directory based on previous visits.
|
||||
func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool {
|
||||
if tagFilename == "" {
|
||||
return false
|
||||
}
|
||||
@@ -75,6 +125,19 @@ func isExcludedByFile(filename, tagFilename, header string) bool {
|
||||
if base == tagFilename {
|
||||
return false // do not exclude the tagfile itself
|
||||
}
|
||||
rc.Lock()
|
||||
defer rc.Unlock()
|
||||
|
||||
rejected, visited := rc.Get(dir)
|
||||
if visited {
|
||||
return rejected
|
||||
}
|
||||
rejected = isDirExcludedByFile(dir, tagFilename, header)
|
||||
rc.Store(dir, rejected)
|
||||
return rejected
|
||||
}
|
||||
|
||||
func isDirExcludedByFile(dir, tagFilename, header string) bool {
|
||||
tf := filepath.Join(dir, tagFilename)
|
||||
_, err := fs.Lstat(tf)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -177,3 +240,27 @@ func rejectByDevice(samples []string) (RejectFunc, error) {
|
||||
panic(fmt.Sprintf("item %v, device id %v not found, allowedDevs: %v", item, id, allowed))
|
||||
}, nil
|
||||
}
|
||||
|
||||
// rejectResticCache returns a RejectFunc that rejects the restic cache
|
||||
// directory (if set).
|
||||
func rejectResticCache(repo *repository.Repository) (RejectFunc, error) {
|
||||
if repo.Cache == nil {
|
||||
return func(string, os.FileInfo) bool {
|
||||
return false
|
||||
}, nil
|
||||
}
|
||||
cacheBase := repo.Cache.BaseDir()
|
||||
|
||||
if cacheBase == "" {
|
||||
return nil, errors.New("cacheBase is empty string")
|
||||
}
|
||||
|
||||
return func(item string, _ os.FileInfo) bool {
|
||||
if fs.HasPathPrefix(cacheBase, item) {
|
||||
debug.Log("rejecting restic cache directory %v", item)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}, nil
|
||||
}
|
||||
|
@@ -76,7 +76,7 @@ func TestIsExcludedByFile(t *testing.T) {
|
||||
if tc.content == "" {
|
||||
h = ""
|
||||
}
|
||||
if got := isExcludedByFile(foo, tagFilename, h); tc.want != got {
|
||||
if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got {
|
||||
t.Fatalf("expected %v, got %v", tc.want, got)
|
||||
}
|
||||
})
|
||||
|
31
cmd/restic/excludes
Normal file
31
cmd/restic/excludes
Normal file
@@ -0,0 +1,31 @@
|
||||
/boot
|
||||
/dev
|
||||
/etc
|
||||
/home
|
||||
/lost+found
|
||||
/mnt
|
||||
/proc
|
||||
/root
|
||||
/run
|
||||
/sys
|
||||
/tmp
|
||||
/usr
|
||||
/var
|
||||
/opt/android-sdk
|
||||
/opt/bullet
|
||||
/opt/dex2jar
|
||||
/opt/jameica
|
||||
/opt/google
|
||||
/opt/JDownloader
|
||||
/opt/JDownloaderScripts
|
||||
/opt/opencascade
|
||||
/opt/vagrant
|
||||
/opt/visual-studio-code
|
||||
/opt/vtk6
|
||||
/bin
|
||||
/fonts*
|
||||
/srv/ftp
|
||||
/srv/http
|
||||
/sbin
|
||||
/lib
|
||||
/lib64
|
@@ -9,7 +9,9 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/azure"
|
||||
"github.com/restic/restic/internal/backend/b2"
|
||||
"github.com/restic/restic/internal/backend/gs"
|
||||
@@ -19,7 +21,9 @@ import (
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/backend/sftp"
|
||||
"github.com/restic/restic/internal/backend/swift"
|
||||
"github.com/restic/restic/internal/cache"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/limiter"
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
@@ -38,6 +42,12 @@ type GlobalOptions struct {
|
||||
Quiet bool
|
||||
NoLock bool
|
||||
JSON bool
|
||||
CacheDir string
|
||||
NoCache bool
|
||||
CACerts []string
|
||||
|
||||
LimitUploadKb int
|
||||
LimitDownloadKb int
|
||||
|
||||
ctx context.Context
|
||||
password string
|
||||
@@ -68,7 +78,11 @@ func init() {
|
||||
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
|
||||
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
|
||||
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
|
||||
|
||||
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory")
|
||||
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
|
||||
f.StringSliceVar(&globalOptions.CACerts, "cacert", nil, "path to load root certificates from (default: use system certificates)")
|
||||
f.IntVar(&globalOptions.LimitUploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)")
|
||||
f.IntVar(&globalOptions.LimitDownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)")
|
||||
f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
|
||||
|
||||
restoreTerminal()
|
||||
@@ -310,6 +324,15 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.LimitUploadKb > 0 || opts.LimitDownloadKb > 0 {
|
||||
debug.Log("rate limiting backend to %d KiB/s upload and %d KiB/s download", opts.LimitUploadKb, opts.LimitDownloadKb)
|
||||
be = limiter.LimitBackend(be, limiter.NewStaticLimiter(opts.LimitUploadKb, opts.LimitDownloadKb))
|
||||
}
|
||||
|
||||
be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {
|
||||
Warnf("%v returned error, retrying after %v: %v\n", msg, d, err)
|
||||
})
|
||||
|
||||
s := repository.New(be)
|
||||
|
||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
||||
@@ -322,6 +345,21 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stdoutIsTerminal() {
|
||||
Verbosef("password is correct\n")
|
||||
}
|
||||
|
||||
if opts.NoCache {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
cache, err := cache.New(s.Config().ID, opts.CacheDir)
|
||||
if err != nil {
|
||||
Warnf("unable to open cache: %v\n", err)
|
||||
} else {
|
||||
s.UseCache(cache)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -466,23 +504,28 @@ func open(s string, opts options.Options) (restic.Backend, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rt, err := backend.Transport(globalOptions.CACerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
be, err = local.Open(cfg.(local.Config))
|
||||
case "sftp":
|
||||
be, err = sftp.Open(cfg.(sftp.Config))
|
||||
be, err = sftp.Open(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
|
||||
case "s3":
|
||||
be, err = s3.Open(cfg.(s3.Config))
|
||||
be, err = s3.Open(cfg.(s3.Config), rt)
|
||||
case "gs":
|
||||
be, err = gs.Open(cfg.(gs.Config))
|
||||
case "azure":
|
||||
be, err = azure.Open(cfg.(azure.Config))
|
||||
be, err = azure.Open(cfg.(azure.Config), rt)
|
||||
case "swift":
|
||||
be, err = swift.Open(cfg.(swift.Config))
|
||||
be, err = swift.Open(cfg.(swift.Config), rt)
|
||||
case "b2":
|
||||
be, err = b2.Open(cfg.(b2.Config))
|
||||
be, err = b2.Open(cfg.(b2.Config), rt)
|
||||
case "rest":
|
||||
be, err = rest.Open(cfg.(rest.Config))
|
||||
be, err = rest.Open(cfg.(rest.Config), rt)
|
||||
|
||||
default:
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
@@ -518,23 +561,28 @@ func create(s string, opts options.Options) (restic.Backend, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rt, err := backend.Transport(globalOptions.CACerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
return local.Create(cfg.(local.Config))
|
||||
case "sftp":
|
||||
return sftp.Create(cfg.(sftp.Config))
|
||||
return sftp.Create(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
|
||||
case "s3":
|
||||
return s3.Create(cfg.(s3.Config))
|
||||
return s3.Create(cfg.(s3.Config), rt)
|
||||
case "gs":
|
||||
return gs.Create(cfg.(gs.Config))
|
||||
case "azure":
|
||||
return azure.Create(cfg.(azure.Config))
|
||||
return azure.Create(cfg.(azure.Config), rt)
|
||||
case "swift":
|
||||
return swift.Open(cfg.(swift.Config))
|
||||
return swift.Open(cfg.(swift.Config), rt)
|
||||
case "b2":
|
||||
return b2.Create(cfg.(b2.Config))
|
||||
return b2.Create(cfg.(b2.Config), rt)
|
||||
case "rest":
|
||||
return rest.Create(cfg.(rest.Config))
|
||||
return rest.Create(cfg.(rest.Config), rt)
|
||||
}
|
||||
|
||||
debug.Log("invalid repository scheme: %v", s)
|
||||
|
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,7 +56,7 @@ func waitForMount(t testing.TB, dir string) {
|
||||
|
||||
func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
opts := MountOptions{}
|
||||
OK(t, runMount(opts, gopts, []string{dir}))
|
||||
rtest.OK(t, runMount(opts, gopts, []string{dir}))
|
||||
}
|
||||
|
||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
@@ -75,14 +75,14 @@ func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
|
||||
func listSnapshots(t testing.TB, dir string) []string {
|
||||
snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots"))
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
names, err := snapshotsDir.Readdirnames(-1)
|
||||
OK(t, err)
|
||||
OK(t, snapshotsDir.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, snapshotsDir.Close())
|
||||
return names
|
||||
}
|
||||
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
|
||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||
|
||||
go testRunMount(t, global, mountpoint)
|
||||
@@ -98,18 +98,28 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||
|
||||
namesInSnapshots := listSnapshots(t, mountpoint)
|
||||
t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots)
|
||||
Assert(t,
|
||||
len(namesInSnapshots) == len(snapshotIDs),
|
||||
"Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
|
||||
rtest.Assert(t,
|
||||
expectedSnapshotsInFuseDir == len(namesInSnapshots),
|
||||
"Invalid number of snapshots: expected %d, got %d", expectedSnapshotsInFuseDir, len(namesInSnapshots))
|
||||
|
||||
namesMap := make(map[string]bool)
|
||||
for _, name := range namesInSnapshots {
|
||||
namesMap[name] = false
|
||||
}
|
||||
|
||||
// Is "latest" present?
|
||||
if len(namesMap) != 0 {
|
||||
_, ok := namesMap["latest"]
|
||||
if !ok {
|
||||
t.Errorf("Symlink latest isn't present in fuse dir")
|
||||
} else {
|
||||
namesMap["latest"] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
ts := snapshot.Time.Format(time.RFC3339)
|
||||
present, ok := namesMap[ts]
|
||||
@@ -133,12 +143,12 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||
}
|
||||
|
||||
for name, present := range namesMap {
|
||||
Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
|
||||
rtest.Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
if !RunFuseTest {
|
||||
if !rtest.RunFuseTest {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
@@ -148,53 +158,53 @@ func TestMount(t *testing.T) {
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
repo, err := OpenRepository(env.gopts)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
// We remove the mountpoint now to check that cmdMount creates it
|
||||
RemoveAll(t, env.mountpoint)
|
||||
rtest.RemoveAll(t, env.mountpoint)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{})
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0)
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
|
||||
// first backup
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
rtest.Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2)
|
||||
|
||||
// second backup, implicit incremental
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
rtest.Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3)
|
||||
|
||||
// third backup, explicit incremental
|
||||
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
|
||||
testRunBackup(t, []string{env.testdata}, bopts, env.gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
rtest.Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4)
|
||||
}
|
||||
|
||||
func TestMountSameTimestamps(t *testing.T) {
|
||||
if !RunFuseTest {
|
||||
if !rtest.RunFuseTest {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
|
||||
repo, err := OpenRepository(env.gopts)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
ids := []restic.ID{
|
||||
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
|
||||
@@ -202,5 +212,5 @@ func TestMountSameTimestamps(t *testing.T) {
|
||||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||
}
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids)
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4)
|
||||
}
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
type dirEntry struct {
|
||||
@@ -71,7 +71,17 @@ func sameModTime(fi1, fi2 os.FileInfo) bool {
|
||||
}
|
||||
}
|
||||
|
||||
return fi1.ModTime().Equal(fi2.ModTime())
|
||||
same := fi1.ModTime().Equal(fi2.ModTime())
|
||||
if !same && (runtime.GOOS == "darwin" || runtime.GOOS == "openbsd") {
|
||||
// Allow up to 1μs difference, because macOS <10.13 cannot restore
|
||||
// with nanosecond precision and the current version of Go (1.9.2)
|
||||
// does not yet support the new syscall. (#1087)
|
||||
mt1 := fi1.ModTime()
|
||||
mt2 := fi2.ModTime()
|
||||
usecDiff := (mt1.Nanosecond()-mt2.Nanosecond())/1000 + (mt1.Second()-mt2.Second())*1000000
|
||||
same = usecDiff <= 1 && usecDiff >= -1
|
||||
}
|
||||
return same
|
||||
}
|
||||
|
||||
// directoriesEqualContents checks if both directories contain exactly the same
|
||||
@@ -174,14 +184,14 @@ type testEnvironment struct {
|
||||
// withTestEnvironment creates a test environment and returns a cleanup
|
||||
// function which removes it.
|
||||
func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
||||
if !RunIntegrationTest {
|
||||
if !rtest.RunIntegrationTest {
|
||||
t.Skip("integration tests disabled")
|
||||
}
|
||||
|
||||
repository.TestUseLowSecurityKDFParameters(t)
|
||||
|
||||
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
|
||||
OK(t, err)
|
||||
tempdir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-")
|
||||
rtest.OK(t, err)
|
||||
|
||||
env = &testEnvironment{
|
||||
base: tempdir,
|
||||
@@ -191,16 +201,17 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
||||
mountpoint: filepath.Join(tempdir, "mount"),
|
||||
}
|
||||
|
||||
OK(t, os.MkdirAll(env.mountpoint, 0700))
|
||||
OK(t, os.MkdirAll(env.testdata, 0700))
|
||||
OK(t, os.MkdirAll(env.cache, 0700))
|
||||
OK(t, os.MkdirAll(env.repo, 0700))
|
||||
rtest.OK(t, os.MkdirAll(env.mountpoint, 0700))
|
||||
rtest.OK(t, os.MkdirAll(env.testdata, 0700))
|
||||
rtest.OK(t, os.MkdirAll(env.cache, 0700))
|
||||
rtest.OK(t, os.MkdirAll(env.repo, 0700))
|
||||
|
||||
env.gopts = GlobalOptions{
|
||||
Repo: env.repo,
|
||||
Quiet: true,
|
||||
CacheDir: env.cache,
|
||||
ctx: context.Background(),
|
||||
password: TestPassword,
|
||||
password: rtest.TestPassword,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
extended: make(options.Options),
|
||||
@@ -210,11 +221,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
||||
globalOptions = env.gopts
|
||||
|
||||
cleanup = func() {
|
||||
if !TestCleanupTempDirs {
|
||||
if !rtest.TestCleanupTempDirs {
|
||||
t.Logf("leaving temporary directory %v used for test", tempdir)
|
||||
return
|
||||
}
|
||||
RemoveAll(t, tempdir)
|
||||
rtest.RemoveAll(t, tempdir)
|
||||
}
|
||||
|
||||
return env, cleanup
|
||||
|
@@ -17,13 +17,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/filter"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
|
||||
@@ -47,13 +46,13 @@ func testRunInit(t testing.TB, opts GlobalOptions) {
|
||||
repository.TestUseLowSecurityKDFParameters(t)
|
||||
restic.TestSetLockTimeout(t, 0)
|
||||
|
||||
OK(t, runInit(opts, nil))
|
||||
rtest.OK(t, runInit(opts, nil))
|
||||
t.Logf("repository initialized at %v", opts.Repo)
|
||||
}
|
||||
|
||||
func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) {
|
||||
t.Logf("backing up %v", target)
|
||||
OK(t, runBackup(opts, gopts, target))
|
||||
rtest.OK(t, runBackup(opts, gopts, target))
|
||||
}
|
||||
|
||||
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
|
||||
@@ -63,7 +62,7 @@ func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
OK(t, runList(opts, []string{tpe}))
|
||||
rtest.OK(t, runList(opts, []string{tpe}))
|
||||
return parseIDsFromReader(t, buf)
|
||||
}
|
||||
|
||||
@@ -78,7 +77,7 @@ func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths [
|
||||
Paths: paths,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{"latest"}))
|
||||
rtest.OK(t, runRestore(opts, gopts, []string{"latest"}))
|
||||
}
|
||||
|
||||
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
|
||||
@@ -87,7 +86,7 @@ func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
||||
Exclude: excludes,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
|
||||
@@ -96,7 +95,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
||||
Include: includes,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func testRunCheck(t testing.TB, gopts GlobalOptions) {
|
||||
@@ -104,7 +103,7 @@ func testRunCheck(t testing.TB, gopts GlobalOptions) {
|
||||
ReadData: true,
|
||||
CheckUnused: true,
|
||||
}
|
||||
OK(t, runCheck(opts, gopts, nil))
|
||||
rtest.OK(t, runCheck(opts, gopts, nil))
|
||||
}
|
||||
|
||||
func testRunCheckOutput(gopts GlobalOptions) (string, error) {
|
||||
@@ -129,7 +128,7 @@ func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
OK(t, runRebuildIndex(gopts))
|
||||
rtest.OK(t, runRebuildIndex(gopts))
|
||||
}
|
||||
|
||||
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||
@@ -144,7 +143,7 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||
|
||||
opts := LsOptions{}
|
||||
|
||||
OK(t, runLs(opts, gopts, []string{snapshotID}))
|
||||
rtest.OK(t, runLs(opts, gopts, []string{snapshotID}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
}
|
||||
@@ -160,7 +159,7 @@ func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern strin
|
||||
|
||||
opts := FindOptions{}
|
||||
|
||||
OK(t, runFind(opts, gopts, []string{pattern}))
|
||||
rtest.OK(t, runFind(opts, gopts, []string{pattern}))
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
@@ -176,10 +175,10 @@ func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snap
|
||||
|
||||
opts := SnapshotOptions{}
|
||||
|
||||
OK(t, runSnapshots(opts, globalOptions, []string{}))
|
||||
rtest.OK(t, runSnapshots(opts, globalOptions, []string{}))
|
||||
|
||||
snapshots := []Snapshot{}
|
||||
OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
|
||||
rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
|
||||
|
||||
snapmap = make(map[restic.ID]Snapshot, len(snapshots))
|
||||
for _, sn := range snapshots {
|
||||
@@ -193,11 +192,11 @@ func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snap
|
||||
|
||||
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||
opts := ForgetOptions{}
|
||||
OK(t, runForget(opts, gopts, args))
|
||||
rtest.OK(t, runForget(opts, gopts, args))
|
||||
}
|
||||
|
||||
func testRunPrune(t testing.TB, gopts GlobalOptions) {
|
||||
OK(t, runPrune(gopts))
|
||||
rtest.OK(t, runPrune(gopts))
|
||||
}
|
||||
|
||||
func TestBackup(t *testing.T) {
|
||||
@@ -210,18 +209,18 @@ func TestBackup(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
opts := BackupOptions{}
|
||||
|
||||
// first backup
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
rtest.Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
testRunCheck(t, env.gopts)
|
||||
@@ -230,7 +229,7 @@ func TestBackup(t *testing.T) {
|
||||
// second backup, implicit incremental
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
rtest.Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
stat2 := dirStats(env.repo)
|
||||
@@ -244,7 +243,7 @@ func TestBackup(t *testing.T) {
|
||||
opts.Parent = snapshotIDs[0].String()
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
rtest.Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
stat3 := dirStats(env.repo)
|
||||
@@ -258,7 +257,7 @@ func TestBackup(t *testing.T) {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
|
||||
Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
|
||||
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
|
||||
"directories are not equal")
|
||||
}
|
||||
|
||||
@@ -275,10 +274,10 @@ func TestBackupNonExistingFile(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
@@ -309,10 +308,10 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
@@ -331,7 +330,7 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
t.Logf("in hook, removing test file testdata/0/0/9/37")
|
||||
ranHook = true
|
||||
|
||||
OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
})
|
||||
|
||||
opts := BackupOptions{}
|
||||
@@ -339,7 +338,7 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
rtest.Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk1")
|
||||
}
|
||||
|
||||
@@ -353,10 +352,10 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
@@ -376,7 +375,7 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
t.Logf("in hook, removing test file testdata/0/0/9/37")
|
||||
ranHook = true
|
||||
|
||||
OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
})
|
||||
|
||||
opts := BackupOptions{}
|
||||
@@ -384,7 +383,7 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
rtest.Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk2")
|
||||
}
|
||||
|
||||
@@ -398,10 +397,10 @@ func TestBackupChangedFile(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
@@ -423,7 +422,7 @@ func TestBackupChangedFile(t *testing.T) {
|
||||
t.Logf("in hook, modifying test file %v", modFile)
|
||||
ranHook = true
|
||||
|
||||
OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
|
||||
rtest.OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
|
||||
})
|
||||
|
||||
opts := BackupOptions{}
|
||||
@@ -431,7 +430,7 @@ func TestBackupChangedFile(t *testing.T) {
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
rtest.Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("archiver.SaveFile")
|
||||
}
|
||||
|
||||
@@ -445,10 +444,10 @@ func TestBackupDirectoryError(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
@@ -472,22 +471,22 @@ func TestBackupDirectoryError(t *testing.T) {
|
||||
t.Logf("in hook, removing test file %v", testdir)
|
||||
ranHook = true
|
||||
|
||||
OK(t, os.RemoveAll(testdir))
|
||||
rtest.OK(t, os.RemoveAll(testdir))
|
||||
})
|
||||
|
||||
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
rtest.Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk2")
|
||||
|
||||
snapshots := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshots) > 0,
|
||||
rtest.Assert(t, len(snapshots) > 0,
|
||||
"no snapshots found in repo (%v)", datafile)
|
||||
|
||||
files := testRunLs(t, env.gopts, snapshots[0].String())
|
||||
|
||||
Assert(t, len(files) > 1, "snapshot is empty")
|
||||
rtest.Assert(t, len(files) > 1, "snapshot is empty")
|
||||
}
|
||||
|
||||
func includes(haystack []string, needle string) bool {
|
||||
@@ -539,13 +538,13 @@ func TestBackupExclude(t *testing.T) {
|
||||
|
||||
for _, filename := range backupExcludeFilenames {
|
||||
fp := filepath.Join(datadir, filename)
|
||||
OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
|
||||
|
||||
f, err := os.Create(fp)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
fmt.Fprintf(f, filename)
|
||||
OK(t, f.Close())
|
||||
rtest.OK(t, f.Close())
|
||||
}
|
||||
|
||||
snapshots := make(map[string]struct{})
|
||||
@@ -555,23 +554,23 @@ func TestBackupExclude(t *testing.T) {
|
||||
testRunBackup(t, []string{datadir}, opts, env.gopts)
|
||||
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||
files := testRunLs(t, env.gopts, snapshotID)
|
||||
Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
rtest.Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
|
||||
|
||||
opts.Excludes = []string{"*.tar.gz"}
|
||||
testRunBackup(t, []string{datadir}, opts, env.gopts)
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||
files = testRunLs(t, env.gopts, snapshotID)
|
||||
Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||
|
||||
opts.Excludes = []string{"*.tar.gz", "private/secret"}
|
||||
testRunBackup(t, []string{datadir}, opts, env.gopts)
|
||||
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
|
||||
files = testRunLs(t, env.gopts, snapshotID)
|
||||
Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
|
||||
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||
Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
|
||||
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
|
||||
"expected file %q not in first snapshot, but it's included", "passwords.txt")
|
||||
}
|
||||
|
||||
@@ -612,7 +611,7 @@ func TestIncrementalBackup(t *testing.T) {
|
||||
datadir := filepath.Join(env.base, "testdata")
|
||||
testfile := filepath.Join(datadir, "testfile")
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalFirstWrite))
|
||||
rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
@@ -620,7 +619,7 @@ func TestIncrementalBackup(t *testing.T) {
|
||||
testRunCheck(t, env.gopts)
|
||||
stat1 := dirStats(env.repo)
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalSecondWrite))
|
||||
rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
|
||||
|
||||
testRunBackup(t, []string{datadir}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
@@ -630,7 +629,7 @@ func TestIncrementalBackup(t *testing.T) {
|
||||
}
|
||||
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalThirdWrite))
|
||||
rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
|
||||
|
||||
testRunBackup(t, []string{datadir}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
@@ -647,28 +646,32 @@ func TestBackupTags(t *testing.T) {
|
||||
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
testRunInit(t, env.gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ := testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 0,
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 0,
|
||||
"expected no tags, got %v", newest.Tags)
|
||||
parent := newest
|
||||
|
||||
opts.Tags = []string{"NL"}
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||
"expected one NL tag, got %v", newest.Tags)
|
||||
// Tagged backup should have untagged backup as parent.
|
||||
rtest.Assert(t, parent.ID.Equal(*newest.Parent),
|
||||
"expected parent to be %v, got %v", parent.ID, newest.Parent)
|
||||
}
|
||||
|
||||
func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
|
||||
OK(t, runTag(opts, gopts, []string{}))
|
||||
rtest.OK(t, runTag(opts, gopts, []string{}))
|
||||
}
|
||||
|
||||
func TestTag(t *testing.T) {
|
||||
@@ -677,68 +680,68 @@ func TestTag(t *testing.T) {
|
||||
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
testRunInit(t, env.gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ := testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 0,
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 0,
|
||||
"expected no tags, got %v", newest.Tags)
|
||||
Assert(t, newest.Original == nil,
|
||||
rtest.Assert(t, newest.Original == nil,
|
||||
"expected original ID to be nil, got %v", newest.Original)
|
||||
originalID := *newest.ID
|
||||
|
||||
testRunTag(t, TagOptions{SetTags: []string{"NL"}}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
|
||||
"set failed, expected one NL tag, got %v", newest.Tags)
|
||||
Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
Assert(t, *newest.Original == originalID,
|
||||
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
rtest.Assert(t, *newest.Original == originalID,
|
||||
"expected original ID to be set to the first snapshot id")
|
||||
|
||||
testRunTag(t, TagOptions{AddTags: []string{"CH"}}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
|
||||
"add failed, expected CH,NL tags, got %v", newest.Tags)
|
||||
Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
Assert(t, *newest.Original == originalID,
|
||||
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
rtest.Assert(t, *newest.Original == originalID,
|
||||
"expected original ID to be set to the first snapshot id")
|
||||
|
||||
testRunTag(t, TagOptions{RemoveTags: []string{"NL"}}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
|
||||
"remove failed, expected one CH tag, got %v", newest.Tags)
|
||||
Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
Assert(t, *newest.Original == originalID,
|
||||
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
rtest.Assert(t, *newest.Original == originalID,
|
||||
"expected original ID to be set to the first snapshot id")
|
||||
|
||||
testRunTag(t, TagOptions{AddTags: []string{"US", "RU"}}, env.gopts)
|
||||
testRunTag(t, TagOptions{RemoveTags: []string{"CH", "US", "RU"}}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 0,
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 0,
|
||||
"expected no tags, got %v", newest.Tags)
|
||||
Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
Assert(t, *newest.Original == originalID,
|
||||
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
rtest.Assert(t, *newest.Original == originalID,
|
||||
"expected original ID to be set to the first snapshot id")
|
||||
|
||||
// Check special case of removing all tags.
|
||||
testRunTag(t, TagOptions{SetTags: []string{""}}, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
newest, _ = testRunSnapshots(t, env.gopts)
|
||||
Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
Assert(t, len(newest.Tags) == 0,
|
||||
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
|
||||
rtest.Assert(t, len(newest.Tags) == 0,
|
||||
"expected no tags, got %v", newest.Tags)
|
||||
Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
Assert(t, *newest.Original == originalID,
|
||||
rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
|
||||
rtest.Assert(t, *newest.Original == originalID,
|
||||
"expected original ID to be set to the first snapshot id")
|
||||
}
|
||||
|
||||
@@ -750,7 +753,7 @@ func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
OK(t, runKey(gopts, []string{"list"}))
|
||||
rtest.OK(t, runKey(gopts, []string{"list"}))
|
||||
|
||||
scanner := bufio.NewScanner(buf)
|
||||
exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
|
||||
@@ -771,7 +774,7 @@ func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions)
|
||||
testKeyNewPassword = ""
|
||||
}()
|
||||
|
||||
OK(t, runKey(gopts, []string{"add"}))
|
||||
rtest.OK(t, runKey(gopts, []string{"add"}))
|
||||
}
|
||||
|
||||
func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||
@@ -780,13 +783,13 @@ func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||
testKeyNewPassword = ""
|
||||
}()
|
||||
|
||||
OK(t, runKey(gopts, []string{"passwd"}))
|
||||
rtest.OK(t, runKey(gopts, []string{"passwd"}))
|
||||
}
|
||||
|
||||
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
|
||||
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
|
||||
for _, id := range IDs {
|
||||
OK(t, runKey(gopts, []string{"remove", id}))
|
||||
rtest.OK(t, runKey(gopts, []string{"remove", id}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -814,7 +817,7 @@ func TestKeyAddRemove(t *testing.T) {
|
||||
|
||||
env.gopts.password = passwordList[len(passwordList)-1]
|
||||
t.Logf("testing access with last password %q\n", env.gopts.password)
|
||||
OK(t, runKey(env.gopts, []string{"list"}))
|
||||
rtest.OK(t, runKey(env.gopts, []string{"list"}))
|
||||
testRunCheck(t, env.gopts)
|
||||
}
|
||||
|
||||
@@ -847,10 +850,10 @@ func TestRestoreFilter(t *testing.T) {
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
for _, test := range testfiles {
|
||||
p := filepath.Join(env.testdata, test.name)
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, test.size))
|
||||
for _, testFile := range testfiles {
|
||||
p := filepath.Join(env.testdata, testFile.name)
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
rtest.OK(t, appendRandomData(p, testFile.size))
|
||||
}
|
||||
|
||||
opts := BackupOptions{}
|
||||
@@ -862,20 +865,20 @@ func TestRestoreFilter(t *testing.T) {
|
||||
|
||||
// no restore filter should restore all files
|
||||
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
|
||||
for _, test := range testfiles {
|
||||
OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", test.name), int64(test.size)))
|
||||
for _, testFile := range testfiles {
|
||||
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
|
||||
}
|
||||
|
||||
for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
|
||||
base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
|
||||
testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
|
||||
for _, test := range testfiles {
|
||||
err := testFileSize(filepath.Join(base, "testdata", test.name), int64(test.size))
|
||||
if ok, _ := filter.Match(pat, filepath.Base(test.name)); !ok {
|
||||
OK(t, err)
|
||||
for _, testFile := range testfiles {
|
||||
err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
|
||||
if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
|
||||
rtest.OK(t, err)
|
||||
} else {
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore step %v, but it exists, err %v", test.name, i+1, err)
|
||||
rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -889,8 +892,8 @@ func TestRestore(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
rtest.OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
|
||||
}
|
||||
|
||||
opts := BackupOptions{}
|
||||
@@ -902,7 +905,7 @@ func TestRestore(t *testing.T) {
|
||||
restoredir := filepath.Join(env.base, "restore")
|
||||
testRunRestoreLatest(t, env.gopts, restoredir, nil, "")
|
||||
|
||||
Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
|
||||
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
|
||||
"directories are not equal")
|
||||
}
|
||||
|
||||
@@ -913,8 +916,8 @@ func TestRestoreLatest(t *testing.T) {
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
p := filepath.Join(env.testdata, "testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, 100))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
rtest.OK(t, appendRandomData(p, 100))
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
@@ -922,24 +925,24 @@ func TestRestoreLatest(t *testing.T) {
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
os.Remove(p)
|
||||
OK(t, appendRandomData(p, 101))
|
||||
rtest.OK(t, appendRandomData(p, 101))
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
// Restore latest without any filters
|
||||
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, "")
|
||||
OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
|
||||
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
|
||||
|
||||
// Setup test files in different directories backed up in different snapshots
|
||||
p1 := filepath.Join(env.testdata, "p1/testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
|
||||
OK(t, appendRandomData(p1, 102))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
|
||||
rtest.OK(t, appendRandomData(p1, 102))
|
||||
testRunBackup(t, []string{filepath.Dir(p1)}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
p2 := filepath.Join(env.testdata, "p2/testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
|
||||
OK(t, appendRandomData(p2, 103))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
|
||||
rtest.OK(t, appendRandomData(p2, 103))
|
||||
testRunBackup(t, []string{filepath.Dir(p2)}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
@@ -947,16 +950,16 @@ func TestRestoreLatest(t *testing.T) {
|
||||
p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
|
||||
|
||||
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
|
||||
OK(t, testFileSize(p1rAbs, int64(102)))
|
||||
rtest.OK(t, testFileSize(p1rAbs, int64(102)))
|
||||
if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
|
||||
}
|
||||
|
||||
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
|
||||
OK(t, testFileSize(p2rAbs, int64(103)))
|
||||
rtest.OK(t, testFileSize(p2rAbs, int64(103)))
|
||||
if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
|
||||
}
|
||||
}
|
||||
@@ -966,10 +969,10 @@ func TestRestoreWithPermissionFailure(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
snapshots := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshots) > 0,
|
||||
rtest.Assert(t, len(snapshots) > 0,
|
||||
"no snapshots found in repo (%v)", datafile)
|
||||
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
@@ -984,9 +987,9 @@ func TestRestoreWithPermissionFailure(t *testing.T) {
|
||||
files := testRunLs(t, env.gopts, snapshots[0].String())
|
||||
for _, filename := range files {
|
||||
fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
Assert(t, !isFile(fi) || fi.Size() > 0,
|
||||
rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
|
||||
"file %v restored, but filesize is 0", filename)
|
||||
}
|
||||
}
|
||||
@@ -1007,9 +1010,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, 200))
|
||||
OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
rtest.OK(t, appendRandomData(p, 200))
|
||||
rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
@@ -1025,9 +1028,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
|
||||
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
|
||||
fi, err := os.Stat(f1)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
Assert(t, fi.ModTime() != time.Unix(0, 0),
|
||||
rtest.Assert(t, fi.ModTime() != time.Unix(0, 0),
|
||||
"meta data of intermediate directory has been restore although it was ignored")
|
||||
|
||||
// restore with filter "*", this should restore meta data on everything.
|
||||
@@ -1035,9 +1038,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
|
||||
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
|
||||
fi, err = os.Stat(f2)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
Assert(t, fi.ModTime() == time.Unix(0, 0),
|
||||
rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
|
||||
"meta data of intermediate directory hasn't been restore")
|
||||
}
|
||||
|
||||
@@ -1047,7 +1050,7 @@ func TestFind(t *testing.T) {
|
||||
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
testRunInit(t, env.gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
@@ -1055,15 +1058,15 @@ func TestFind(t *testing.T) {
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
results := testRunFind(t, false, env.gopts, "unexistingfile")
|
||||
Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
|
||||
rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, false, env.gopts, "testfile")
|
||||
lines := strings.Split(string(results), "\n")
|
||||
Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
|
||||
rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, false, env.gopts, "testfile*")
|
||||
lines = strings.Split(string(results), "\n")
|
||||
Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
|
||||
rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
|
||||
}
|
||||
|
||||
type testMatch struct {
|
||||
@@ -1087,7 +1090,7 @@ func TestFindJSON(t *testing.T) {
|
||||
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
testRunInit(t, env.gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
@@ -1096,20 +1099,20 @@ func TestFindJSON(t *testing.T) {
|
||||
|
||||
results := testRunFind(t, true, env.gopts, "unexistingfile")
|
||||
matches := []testMatches{}
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
|
||||
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||
rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, true, env.gopts, "testfile")
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
|
||||
Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
|
||||
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
|
||||
rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
|
||||
|
||||
results = testRunFind(t, true, env.gopts, "testfile*")
|
||||
OK(t, json.Unmarshal(results, &matches))
|
||||
Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
|
||||
Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
|
||||
rtest.OK(t, json.Unmarshal(results, &matches))
|
||||
rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
|
||||
rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
|
||||
rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
|
||||
}
|
||||
|
||||
func TestRebuildIndex(t *testing.T) {
|
||||
@@ -1117,7 +1120,7 @@ func TestRebuildIndex(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
out, err := testRunCheckOutput(env.gopts)
|
||||
if !strings.Contains(out, "contained in several indexes") {
|
||||
@@ -1154,7 +1157,7 @@ func TestCheckRestoreNoLock(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
datafile := filepath.Join("testdata", "small-repo.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
err := filepath.Walk(env.repo, func(p string, fi os.FileInfo, e error) error {
|
||||
if e != nil {
|
||||
@@ -1162,7 +1165,7 @@ func TestCheckRestoreNoLock(t *testing.T) {
|
||||
}
|
||||
return os.Chmod(p, fi.Mode() & ^(os.FileMode(0222)))
|
||||
})
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
env.gopts.NoLock = true
|
||||
|
||||
@@ -1186,24 +1189,24 @@ func TestPrune(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, opts, env.gopts)
|
||||
firstSnapshot := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(firstSnapshot) == 1,
|
||||
rtest.Assert(t, len(firstSnapshot) == 1,
|
||||
"expected one snapshot, got %v", firstSnapshot)
|
||||
|
||||
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "2")}, opts, env.gopts)
|
||||
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "3")}, opts, env.gopts)
|
||||
|
||||
snapshotIDs := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
rtest.Assert(t, len(snapshotIDs) == 3,
|
||||
"expected 3 snapshot, got %v", snapshotIDs)
|
||||
|
||||
testRunForget(t, env.gopts, firstSnapshot[0].String())
|
||||
@@ -1222,12 +1225,12 @@ func TestHardLink(t *testing.T) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, fd.Close())
|
||||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
linkTests := createFileSetPerHardlink(env.testdata)
|
||||
|
||||
@@ -1236,7 +1239,7 @@ func TestHardLink(t *testing.T) {
|
||||
// first backup
|
||||
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", env.gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
rtest.Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
testRunCheck(t, env.gopts)
|
||||
@@ -1246,11 +1249,11 @@ func TestHardLink(t *testing.T) {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
|
||||
Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
|
||||
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
|
||||
"directories are not equal")
|
||||
|
||||
linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
|
||||
Assert(t, linksEqual(linkTests, linkResults),
|
||||
rtest.Assert(t, linksEqual(linkTests, linkResults),
|
||||
"links are not equal")
|
||||
}
|
||||
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestRestoreLocalLayout(t *testing.T) {
|
||||
@@ -24,7 +24,7 @@ func TestRestoreLocalLayout(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
|
||||
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
env.gopts.extended["local.layout"] = test.layout
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestRestoreLocalLayout(t *testing.T) {
|
||||
target := filepath.Join(env.base, "restore")
|
||||
testRunRestoreLatest(t, env.gopts, target, nil, "")
|
||||
|
||||
RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||
RemoveAll(t, target)
|
||||
rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||
rtest.RemoveAll(t, target)
|
||||
}
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
@@ -35,7 +36,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
|
||||
|
||||
lock, err := lockFn(context.TODO(), repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Fatalf("unable to create lock in backend: %v", err)
|
||||
}
|
||||
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
|
||||
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
type Table struct {
|
||||
Header string
|
||||
Rows [][]interface{}
|
||||
Footer string
|
||||
|
||||
RowFormat string
|
||||
}
|
||||
@@ -21,13 +22,19 @@ func NewTable() Table {
|
||||
}
|
||||
}
|
||||
|
||||
func (t Table) printSeparationLine(w io.Writer) error {
|
||||
_, err := fmt.Fprintln(w, strings.Repeat("-", 70))
|
||||
return err
|
||||
}
|
||||
|
||||
// Write prints the table to w.
|
||||
func (t Table) Write(w io.Writer) error {
|
||||
_, err := fmt.Fprintln(w, t.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
|
||||
|
||||
err = t.printSeparationLine(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -39,6 +46,16 @@ func (t Table) Write(w io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
err = t.printSeparationLine(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(w, t.Footer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
16
doc/010_introduction.rst
Normal file
16
doc/010_introduction.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
############
|
||||
Introduction
|
||||
############
|
||||
|
149
doc/020_installation.rst
Normal file
149
doc/020_installation.rst
Normal file
@@ -0,0 +1,149 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
############
|
||||
Installation
|
||||
############
|
||||
|
||||
Packages
|
||||
********
|
||||
|
||||
Mac OS X
|
||||
========
|
||||
|
||||
If you are using Mac OS X, you can install restic using the
|
||||
`homebrew <http://brew.sh/>`__ package manager:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ brew install restic
|
||||
|
||||
Arch Linux
|
||||
==========
|
||||
|
||||
On `Arch Linux <https://www.archlinux.org/>`__, there is a package called ``restic-git``
|
||||
which can be installed from AUR, e.g. with ``pacaur``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pacaur -S restic-git
|
||||
|
||||
Nix & NixOS
|
||||
===========
|
||||
|
||||
If you are using `Nix <https://nixos.org/nix/>`__ or `NixOS <https://nixos.org/>`__
|
||||
there is a package available named ``restic``.
|
||||
It can be installed uisng ``nix-env``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ nix-env --install restic
|
||||
|
||||
Debian
|
||||
======
|
||||
|
||||
On Debian, there's a package called ``restic`` which can be
|
||||
installed from the official repos, e.g. with ``apt-get``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ apt-get install restic
|
||||
|
||||
|
||||
.. warning:: Please be aware that, at the time of writing, Debian *stable*
|
||||
has ``restic`` version 0.3.3 which is very old. The *testing* and *unstable*
|
||||
branches have recent versions of ``restic``.
|
||||
|
||||
Pre-compiled Binary
|
||||
*******************
|
||||
|
||||
You can download the latest pre-compiled binary from the `restic release
|
||||
page <https://github.com/restic/restic/releases/latest>`__.
|
||||
|
||||
Docker Container
|
||||
****************
|
||||
|
||||
.. note::
|
||||
| A docker container is available as a contribution (Thank you!).
|
||||
| You can find it at https://github.com/Lobaro/restic-backup-docker
|
||||
|
||||
From Source
|
||||
***********
|
||||
|
||||
restic is written in the Go programming language and you need at least
|
||||
Go version 1.8. Building restic may also work with older versions of Go,
|
||||
but that's not supported. See the `Getting
|
||||
started <https://golang.org/doc/install>`__ guide of the Go project for
|
||||
instructions how to install Go.
|
||||
|
||||
In order to build restic from source, execute the following steps:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone https://github.com/restic/restic
|
||||
[...]
|
||||
|
||||
$ cd restic
|
||||
|
||||
$ go run build.go
|
||||
|
||||
You can easily cross-compile restic for all supported platforms, just
|
||||
supply the target OS and platform via the command-line options like this
|
||||
(for Windows and FreeBSD respectively):
|
||||
|
||||
::
|
||||
|
||||
$ go run build.go --goos windows --goarch amd64
|
||||
|
||||
$ go run build.go --goos freebsd --goarch 386
|
||||
|
||||
The resulting binary is statically linked and does not require any
|
||||
libraries.
|
||||
|
||||
At the moment, the only tested compiler for restic is the official Go
|
||||
compiler. Building restic with gccgo may work, but is not supported.
|
||||
|
||||
Autocompletion
|
||||
**************
|
||||
|
||||
Restic can write out a bash compatible autocompletion script:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic autocomplete --help
|
||||
The "autocomplete" command generates a shell autocompletion script.
|
||||
|
||||
NOTE: The current version supports Bash only.
|
||||
This should work for *nix systems with Bash installed.
|
||||
|
||||
By default, the file is written directly to ``/etc/bash_completion.d/``
|
||||
for convenience, and the command may need superuser rights, e.g.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo restic autocomplete
|
||||
|
||||
Usage:
|
||||
restic autocomplete [flags]
|
||||
|
||||
Flags:
|
||||
--completionfile string autocompletion file (default "/etc/bash_completion.d/restic.sh")
|
||||
|
||||
Global Flags:
|
||||
--json set output mode to JSON for commands that support it
|
||||
--no-lock do not lock the repo, this allows some operations on read-only repos
|
||||
-o, --option key=value set extended option (key=value, can be specified multiple times)
|
||||
-p, --password-file string read the repository password from a file
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
|
||||
|
402
doc/030_preparing_a_new_repo.rst
Normal file
402
doc/030_preparing_a_new_repo.rst
Normal file
@@ -0,0 +1,402 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
##########################
|
||||
Preparing a new repository
|
||||
##########################
|
||||
|
||||
The place where your backups will be saved at is called a "repository".
|
||||
This chapter explains how to create ("init") such a repository.
|
||||
|
||||
Local
|
||||
*****
|
||||
|
||||
In order to create a repository at ``/tmp/backup``, run the following
|
||||
command and enter the same password twice:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic init --repo /tmp/backup
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend 085b3c76b9 at /tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
.. warning::
|
||||
|
||||
Remembering your password is important! If you lose it, you won't be
|
||||
able to access data stored in the repository.
|
||||
|
||||
For automated backups, restic accepts the repository location in the
|
||||
environment variable ``RESTIC_REPOSITORY``. The password can be read
|
||||
from a file (via the option ``--password-file`` or the environment variable
|
||||
``RESTIC_PASSWORD_FILE``) or the environment variable ``RESTIC_PASSWORD``.
|
||||
|
||||
SFTP
|
||||
****
|
||||
|
||||
In order to backup data via SFTP, you must first set up a server with
|
||||
SSH and let it know your public key. Passwordless login is really
|
||||
important since restic fails to connect to the repository if the server
|
||||
prompts for credentials.
|
||||
|
||||
Once the server is configured, the setup of the SFTP repository can
|
||||
simply be achieved by changing the URL scheme in the ``init`` command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r sftp:user@host:/tmp/backup init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend f1c6108821 at sftp:user@host:/tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
You can also specify a relative (read: no slash (``/``) character at the
|
||||
beginning) directory, in this case the dir is relative to the remote
|
||||
user's home directory.
|
||||
|
||||
.. note:: Please be aware that sftp servers do not expand the tilde character
|
||||
(``~``) normally used as an alias for a user's home directory. If you
|
||||
want to specify a path relative to the user's home directory, pass a
|
||||
relative path to the sftp backend.
|
||||
|
||||
The backend config string does not allow specifying a port. If you need
|
||||
to contact an sftp server on a different port, you can create an entry
|
||||
in the ``ssh`` file, usually located in your user's home directory at
|
||||
``~/.ssh/config`` or in ``/etc/ssh/ssh_config``:
|
||||
|
||||
::
|
||||
|
||||
Host foo
|
||||
User bar
|
||||
Port 2222
|
||||
|
||||
Then use the specified host name ``foo`` normally (you don't need to
|
||||
specify the user name in this case):
|
||||
|
||||
::
|
||||
|
||||
$ restic -r sftp:foo:/tmp/backup init
|
||||
|
||||
You can also add an entry with a special host name which does not exist,
|
||||
just for use with restic, and use the ``Hostname`` option to set the
|
||||
real host name:
|
||||
|
||||
::
|
||||
|
||||
Host restic-backup-host
|
||||
Hostname foo
|
||||
User bar
|
||||
Port 2222
|
||||
|
||||
Then use it in the backend specification:
|
||||
|
||||
::
|
||||
|
||||
$ restic -r sftp:restic-backup-host:/tmp/backup init
|
||||
|
||||
Last, if you'd like to use an entirely different program to create the
|
||||
SFTP connection, you can specify the command to be run with the option
|
||||
``-o sftp.command="foobar"``.
|
||||
|
||||
|
||||
REST Server
|
||||
***********
|
||||
|
||||
In order to backup data to the remote server via HTTP or HTTPS protocol,
|
||||
you must first set up a remote `REST
|
||||
server <https://github.com/restic/rest-server>`__ instance. Once the
|
||||
server is configured, accessing it is achieved by changing the URL
|
||||
scheme like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r rest:http://host:8000/
|
||||
|
||||
Depending on your REST server setup, you can use HTTPS protocol,
|
||||
password protection, or multiple repositories. Or any combination of
|
||||
those features, as you see fit. TCP/IP port is also configurable. Here
|
||||
are some more examples:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r rest:https://host:8000/
|
||||
$ restic -r rest:https://user:pass@host:8000/
|
||||
$ restic -r rest:https://user:pass@host:8000/my_backup_repo/
|
||||
|
||||
If you use TLS, restic will use the system's CA certificates to verify the
|
||||
server certificate. When the verification fails, restic refuses to proceed and
|
||||
exits with an error. If you have your own self-signed certificate, or a custom
|
||||
CA certificate should be used for verification, you can pass restic the
|
||||
certificate filename via the `--cacert` option.
|
||||
|
||||
REST server uses exactly the same directory structure as local backend,
|
||||
so you should be able to access it both locally and via HTTP, even
|
||||
simultaneously.
|
||||
|
||||
Amazon S3
|
||||
*********
|
||||
|
||||
Restic can backup data to any Amazon S3 bucket. However, in this case,
|
||||
changing the URL scheme is not enough since Amazon uses special security
|
||||
credentials to sign HTTP requests. By consequence, you must first setup
|
||||
the following environment variables with the credentials you obtained
|
||||
while creating the bucket.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||
$ export AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||
|
||||
You can then easily initialize a repository that uses your Amazon S3 as
|
||||
a backend, if the bucket does not exist yet it will be created in the
|
||||
default location:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r s3:s3.amazonaws.com/bucket_name init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at s3:s3.amazonaws.com/bucket_name
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
It is not possible at the moment to have restic create a new bucket in a
|
||||
different location, so you need to create it using a different program.
|
||||
Afterwards, the S3 server (``s3.amazonaws.com``) will redirect restic to
|
||||
the correct endpoint.
|
||||
|
||||
Until version 0.8.0, restic used a default prefix of ``restic``, so the files
|
||||
in the bucket were placed in a directory named ``restic``. If you want to
|
||||
access a repository created with an older version of restic, specify the path
|
||||
after the bucket name like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r s3:s3.amazonaws.com/bucket_name/restic [...]
|
||||
|
||||
For an S3-compatible server that is not Amazon (like Minio, see below),
|
||||
or is only available via HTTP, you can specify the URL to the server
|
||||
like this: ``s3:http://server:port/bucket_name``.
|
||||
|
||||
Minio Server
|
||||
************
|
||||
|
||||
`Minio <https://www.minio.io>`__ is an Open Source Object Storage,
|
||||
written in Go and compatible with AWS S3 API.
|
||||
|
||||
- Download and Install `Minio
|
||||
Server <https://minio.io/downloads/#minio-server>`__.
|
||||
- You can also refer to https://docs.minio.io for step by step guidance
|
||||
on installation and getting started on Minio Client and Minio Server.
|
||||
|
||||
You must first setup the following environment variables with the
|
||||
credentials of your running Minio Server.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export AWS_ACCESS_KEY_ID=<YOUR-MINIO-ACCESS-KEY-ID>
|
||||
$ export AWS_SECRET_ACCESS_KEY= <YOUR-MINIO-SECRET-ACCESS-KEY>
|
||||
|
||||
Now you can easily initialize restic to use Minio server as backend with
|
||||
this command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic -r s3:http://localhost:9000/restic init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend 6ad29560f5 at s3:http://localhost:9000/restic1
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
OpenStack Swift
|
||||
***************
|
||||
|
||||
Restic can backup data to an OpenStack Swift container. Because Swift supports
|
||||
various authentication methods, credentials are passed through environment
|
||||
variables. In order to help integration with existing OpenStack installations,
|
||||
the naming convention of those variables follows official python swift client:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For keystone v1 authentication
|
||||
$ export ST_AUTH=<MY_AUTH_URL>
|
||||
$ export ST_USER=<MY_USER_NAME>
|
||||
$ export ST_KEY=<MY_USER_PASSWORD>
|
||||
|
||||
# For keystone v2 authentication (some variables are optional)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_REGION_NAME=<MY_REGION_NAME>
|
||||
$ export OS_USERNAME=<MY_USERNAME>
|
||||
$ export OS_PASSWORD=<MY_PASSWORD>
|
||||
$ export OS_TENANT_ID=<MY_TENANT_ID>
|
||||
$ export OS_TENANT_NAME=<MY_TENANT_NAME>
|
||||
|
||||
# For keystone v3 authentication (some variables are optional)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_REGION_NAME=<MY_REGION_NAME>
|
||||
$ export OS_USERNAME=<MY_USERNAME>
|
||||
$ export OS_PASSWORD=<MY_PASSWORD>
|
||||
$ export OS_USER_DOMAIN_NAME=<MY_DOMAIN_NAME>
|
||||
$ export OS_PROJECT_NAME=<MY_PROJECT_NAME>
|
||||
$ export OS_PROJECT_DOMAIN_NAME=<MY_PROJECT_DOMAIN_NAME>
|
||||
|
||||
# For authentication based on tokens
|
||||
$ export OS_STORAGE_URL=<MY_STORAGE_URL>
|
||||
$ export OS_AUTH_TOKEN=<MY_AUTH_TOKEN>
|
||||
|
||||
|
||||
Restic should be compatible with `OpenStack RC file
|
||||
<https://docs.openstack.org/user-guide/common/cli-set-environment-variables-using-openstack-rc.html>`__
|
||||
in most cases.
|
||||
|
||||
Once environment variables are set up, a new repository can be created. The
|
||||
name of swift container and optional path can be specified. If
|
||||
the container does not exist, it will be created automatically:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r swift:container_name:/path init # path is optional
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at swift:container_name:/path
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
The policy of new container created by restic can be changed using environment variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SWIFT_DEFAULT_CONTAINER_POLICY=<MY_CONTAINER_POLICY>
|
||||
|
||||
|
||||
Backblaze B2
|
||||
************
|
||||
|
||||
Restic can backup data to any Backblaze B2 bucket. You need to first setup the
|
||||
following environment variables with the credentials you obtained when signed
|
||||
into your B2 account:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export B2_ACCOUNT_ID=<MY_ACCOUNT_ID>
|
||||
$ export B2_ACCOUNT_KEY=<MY_SECRET_ACCOUNT_KEY>
|
||||
|
||||
You can then easily initialize a repository stored at Backblaze B2. If the
|
||||
bucket does not exist yet, it will be created:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r b2:bucketname:path/to/repo init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at b2:bucketname:path/to/repo
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
The number of concurrent connections to the B2 service can be set with the `-o
|
||||
b2.connections=10`. By default, at most five parallel connections are
|
||||
established.
|
||||
|
||||
Microsoft Azure Blob Storage
|
||||
****************************
|
||||
|
||||
You can also store backups on Microsoft Azure Blob Storage. Export the Azure
|
||||
account name and key as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export AZURE_ACCOUNT_NAME=<ACCOUNT_NAME>
|
||||
$ export AZURE_ACCOUNT_KEY=<SECRET_KEY>
|
||||
|
||||
Afterwards you can initialize a repository in a container called `foo` in the
|
||||
root path like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r azure:foo:/ init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
|
||||
created restic backend a934bac191 at azure:foo:/
|
||||
[...]
|
||||
|
||||
The number of concurrent connections to the B2 service can be set with the
|
||||
`-o azure.connections=10`. By default, at most five parallel connections are
|
||||
established.
|
||||
|
||||
Google Cloud Storage
|
||||
********************
|
||||
|
||||
Restic supports Google Cloud Storage as a backend.
|
||||
|
||||
Restic connects to Google Cloud Storage via a `service account`_.
|
||||
|
||||
For normal restic operation, the service account must have the
|
||||
``storage.objects.{create,delete,get,list}`` permissions for the bucket. These
|
||||
are included in the "Storage Object Admin" role.
|
||||
``restic init`` can create the repository bucket. Doing so requires the
|
||||
``storage.buckets.create`` permission ("Storage Admin" role). If the bucket
|
||||
already exists, that permission is unnecessary.
|
||||
|
||||
To use the Google Cloud Storage backend, first `create a service account key`_
|
||||
and download the JSON credentials file.
|
||||
Second, find the Google Project ID that you can see in the Google Cloud
|
||||
Platform console at the "Storage/Settings" menu. Export the path to the JSON
|
||||
key file and the project ID as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export GOOGLE_PROJECT_ID=123123123123
|
||||
$ export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.config/gs-secret-restic-key.json
|
||||
|
||||
Then you can use the ``gs:`` backend type to create a new repository in the
|
||||
bucket `foo` at the root path:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r gs:foo:/ init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
|
||||
created restic backend bde47d6254 at gs:foo2/
|
||||
[...]
|
||||
|
||||
The number of concurrent connections to the GCS service can be set with the
|
||||
`-o gs.connections=10`. By default, at most five parallel connections are
|
||||
established.
|
||||
|
||||
.. _service account: https://cloud.google.com/storage/docs/authentication#service_accounts
|
||||
.. _create a service account key: https://cloud.google.com/storage/docs/authentication#generating-a-private-key
|
||||
|
||||
Password prompt on Windows
|
||||
**************************
|
||||
|
||||
At the moment, restic only supports the default Windows console
|
||||
interaction. If you use emulation environments like
|
||||
`MSYS2 <https://msys2.github.io/>`__ or
|
||||
`Cygwin <https://www.cygwin.com/>`__, which use terminals like
|
||||
``Mintty`` or ``rxvt``, you may get a password error:
|
||||
|
||||
You can workaround this by using a special tool called ``winpty`` (look
|
||||
`here <https://sourceforge.net/p/msys2/wiki/Porting/>`__ and
|
||||
`here <https://github.com/rprichard/winpty>`__ for detail information).
|
||||
On MSYS2, you can install ``winpty`` as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pacman -S winpty
|
||||
$ winpty restic -r /tmp/backup init
|
||||
|
180
doc/040_backup.rst
Normal file
180
doc/040_backup.rst
Normal file
@@ -0,0 +1,180 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
##########
|
||||
Backing up
|
||||
##########
|
||||
|
||||
Now we're ready to backup some data. The contents of a directory at a
|
||||
specific point in time is called a "snapshot" in restic. Run the
|
||||
following command and enter the repository password you chose above
|
||||
again:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup ~/work
|
||||
enter password for repository:
|
||||
scan [/home/user/work]
|
||||
scanned 764 directories, 1816 files in 0:00
|
||||
[0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
|
||||
duration: 0:29, 54.47MiB/s
|
||||
snapshot 40dc1520 saved
|
||||
|
||||
As you can see, restic created a backup of the directory and was pretty
|
||||
fast! The specific snapshot just created is identified by a sequence of
|
||||
hexadecimal characters, ``40dc1520`` in this case.
|
||||
|
||||
If you run the command again, restic will create another snapshot of
|
||||
your data, but this time it's even faster. This is de-duplication at
|
||||
work!
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup ~/work
|
||||
enter password for repository:
|
||||
using parent snapshot 40dc1520aa6a07b7b3ae561786770a01951245d2367241e71e9485f18ae8228c
|
||||
scan [/home/user/work]
|
||||
scanned 764 directories, 1816 files in 0:00
|
||||
[0:00] 100.00% 0B/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
|
||||
duration: 0:00, 6572.38MiB/s
|
||||
snapshot 79766175 saved
|
||||
|
||||
You can even backup individual files in the same repository.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup ~/work.txt
|
||||
scan [/home/user/work.txt]
|
||||
scanned 0 directories, 1 files in 0:00
|
||||
[0:00] 100.00% 0B/s 220B / 220B 1 / 1 items 0 errors ETA 0:00
|
||||
duration: 0:00, 0.03MiB/s
|
||||
snapshot 31f7bd63 saved
|
||||
|
||||
In fact several hosts may use the same repository to backup directories
|
||||
and files leading to a greater de-duplication.
|
||||
|
||||
Please be aware that when you backup different directories (or the
|
||||
directories to be saved have a variable name component like a
|
||||
time/date), restic always needs to read all files and only afterwards
|
||||
can compute which parts of the files need to be saved. When you backup
|
||||
the same directory again (maybe with new or changed files) restic will
|
||||
find the old snapshot in the repo and by default only reads those files
|
||||
that are new or have been modified since the last snapshot. This is
|
||||
decided based on the modify date of the file in the file system.
|
||||
|
||||
Now is a good time to run ``restic check`` to verify that all data
|
||||
is properly stored in the repository. You should run this command regularly
|
||||
to make sure the internal structure of the repository is free of errors.
|
||||
|
||||
You can exclude folders and files by specifying exclude-patterns. Either
|
||||
specify them with multiple ``--exclude``'s or one ``--exclude-file``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ cat exclude
|
||||
# exclude go-files
|
||||
*.go
|
||||
# exclude foo/x/y/z/bar foo/x/bar foo/bar
|
||||
foo/**/bar
|
||||
$ restic -r /tmp/backup backup ~/work --exclude=*.c --exclude-file=exclude
|
||||
|
||||
Patterns use `filepath.Glob <https://golang.org/pkg/path/filepath/#Glob>`__ internally,
|
||||
see `filepath.Match <https://golang.org/pkg/path/filepath/#Match>`__ for syntax.
|
||||
Additionally ``**`` excludes arbitrary subdirectories.
|
||||
Environment-variables in exclude-files are expanded with
|
||||
`os.ExpandEnv <https://golang.org/pkg/os/#ExpandEnv>`__.
|
||||
|
||||
By specifying the option ``--one-file-system`` you can instruct restic
|
||||
to only backup files from the file systems the initially specified files
|
||||
or directories reside on. For example, calling restic like this won't
|
||||
backup ``/sys`` or ``/dev`` on a Linux system:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup --one-file-system /
|
||||
|
||||
By using the ``--files-from`` option you can read the files you want to
|
||||
backup from a file. This is especially useful if a lot of files have to
|
||||
be backed up that are not in the same folder or are maybe pre-filtered
|
||||
by other software.
|
||||
|
||||
or example maybe you want to backup files that have a certain filename
|
||||
in them:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ find /tmp/somefiles | grep 'PATTERN' > /tmp/files_to_backup
|
||||
|
||||
You can then use restic to backup the filtered files:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup
|
||||
|
||||
Incidentally you can also combine ``--files-from`` with the normal files
|
||||
args:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup /tmp/some_additional_file
|
||||
|
||||
Backing up special items
|
||||
************************
|
||||
|
||||
**Symlinks** are archived as symlinks, ``restic`` does not follow them.
|
||||
When you restore, you get the same symlink again, with the same link target
|
||||
and the same timestamps.
|
||||
|
||||
If there is a **bind-mount** below a directory that is to be saved, restic descends into it.
|
||||
|
||||
**Device files** are saved and restored as device files. This means that e.g. ``/dev/sda`` is
|
||||
archived as a block device file and restored as such. This also means that the content of the
|
||||
corresponding disk is not read, at least not from the device file.
|
||||
|
||||
|
||||
Reading data from stdin
|
||||
***********************
|
||||
|
||||
Sometimes it can be nice to directly save the output of a program, e.g.
|
||||
``mysqldump`` so that the SQL can later be restored. Restic supports
|
||||
this mode of operation, just supply the option ``--stdin`` to the
|
||||
``backup`` command like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin
|
||||
|
||||
This creates a new snapshot of the output of ``mysqldump``. You can then
|
||||
use e.g. the fuse mounting option (see below) to mount the repository
|
||||
and read the file.
|
||||
|
||||
By default, the file name ``stdin`` is used, a different name can be
|
||||
specified with ``--stdin-filename``, e.g. like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filename production.sql
|
||||
|
||||
Tags for backup
|
||||
***************
|
||||
|
||||
Snapshots can have one or more tags, short strings which add identifying
|
||||
information. Just specify the tags for a snapshot one by one with ``--tag``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup backup --tag projectX --tag foo --tag bar ~/work
|
||||
[...]
|
||||
|
||||
The tags can later be used to keep (or forget) snapshots with the ``forget``
|
||||
command. The command ``tag`` can be used to modify tags on an existing
|
||||
snapshot.
|
89
doc/045_working_with_repos.rst
Normal file
89
doc/045_working_with_repos.rst
Normal file
@@ -0,0 +1,89 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
|
||||
#########################
|
||||
Working with repositories
|
||||
#########################
|
||||
|
||||
Listing all snapshots
|
||||
=====================
|
||||
|
||||
Now, you can list all the snapshots stored in the repository:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
You can filter the listing by directory path:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots --path="/srv"
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
Or filter by host:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots --host luigi
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
Combining filters is also possible.
|
||||
|
||||
|
||||
Checking a repo's integrity and consistency
|
||||
===========================================
|
||||
|
||||
Imagine your repository is saved on a server that has a faulty hard
|
||||
drive, or even worse, attackers get privileged access and modify your
|
||||
backup with the intention to make you restore malicious data:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo echo "boom" >> backup/index/d795ffa99a8ab8f8e42cec1f814df4e48b8f49129360fb57613df93739faee97
|
||||
|
||||
In order to detect these things, it is a good idea to regularly use the
|
||||
``check`` command to test whether everything is alright, your precious
|
||||
backup data is consistent and the integrity is unharmed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup check
|
||||
Load indexes
|
||||
ciphertext verification failed
|
||||
|
||||
Trying to restore a snapshot which has been modified as shown above will
|
||||
yield the same error:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work
|
||||
Load indexes
|
||||
ciphertext verification failed
|
||||
|
82
doc/050_restore.rst
Normal file
82
doc/050_restore.rst
Normal file
@@ -0,0 +1,82 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
#####################
|
||||
Restoring from backup
|
||||
#####################
|
||||
|
||||
Restoring from a snapshot
|
||||
=========================
|
||||
|
||||
Restoring a snapshot is as easy as it sounds, just use the following
|
||||
command to restore the contents of the latest snapshot to
|
||||
``/tmp/restore-work``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work
|
||||
enter password for repository:
|
||||
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
|
||||
|
||||
Use the word ``latest`` to restore the last backup. You can also combine
|
||||
``latest`` with the ``--host`` and ``--path`` filters to choose the last
|
||||
backup for a specific host, path or both.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup restore latest --target /tmp/restore-art --path "/home/art" --host luigi
|
||||
enter password for repository:
|
||||
restoring <Snapshot of [/home/art] at 2015-05-08 21:45:17.884408621 +0200 CEST> to /tmp/restore-art
|
||||
|
||||
Use ``--exclude`` and ``--include`` to restrict the restore to a subset of
|
||||
files in the snapshot. For example, to restore a single file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work --include /work/foo
|
||||
enter password for repository:
|
||||
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
|
||||
|
||||
This will restore the file ``foo`` to ``/tmp/restore-work/work/foo``.
|
||||
|
||||
Restore using mount
|
||||
===================
|
||||
|
||||
Browsing your backup as a regular file system is also very easy. First,
|
||||
create a mount point such as ``/mnt/restic`` and then use the following
|
||||
command to serve the repository with FUSE:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mkdir /mnt/restic
|
||||
$ restic -r /tmp/backup mount /mnt/restic
|
||||
enter password for repository:
|
||||
Now serving /tmp/backup at /mnt/restic
|
||||
Don't forget to umount after quitting!
|
||||
|
||||
Mounting repositories via FUSE is not possible on Windows and OpenBSD.
|
||||
|
||||
Restic supports storage and preservation of hard links. However, since
|
||||
hard links exist in the scope of a filesystem by definition, restoring
|
||||
hard links from a fuse mount should be done by a program that preserves
|
||||
hard links. A program that does so is ``rsync``, used with the option
|
||||
--hard-links.
|
||||
|
||||
Printing files to stdout
|
||||
========================
|
||||
|
||||
Sometimes it's helpful to print files to stdout so that other programs can read
|
||||
the data directly. This can be achieved by using the `dump` command, like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup dump latest production.sql | mysql
|
208
doc/060_forget.rst
Normal file
208
doc/060_forget.rst
Normal file
@@ -0,0 +1,208 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
#########################
|
||||
Removing backup snapshots
|
||||
#########################
|
||||
|
||||
All backup space is finite, so restic allows removing old snapshots.
|
||||
This can be done either manually (by specifying a snapshot ID to remove)
|
||||
or by using a policy that describes which snapshots to forget. For all
|
||||
remove operations, two commands need to be called in sequence:
|
||||
``forget`` to remove a snapshot and ``prune`` to actually remove the
|
||||
data that was referenced by the snapshot from the repository. This can
|
||||
be automated with the ``--prune`` option of the ``forget`` command,
|
||||
which runs ``prune`` automatically if snapshots have been removed.
|
||||
|
||||
It is advisable to run ``restic check`` after pruning, to make sure
|
||||
you are alerted, should the internal data structures of the repository
|
||||
be damaged.
|
||||
|
||||
Remove a single snapshot
|
||||
************************
|
||||
|
||||
The command ``snapshots`` can be used to list all snapshots in a
|
||||
repository like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
In order to remove the snapshot of ``/home/art``, use the ``forget``
|
||||
command and specify the snapshot ID on the command line:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup forget bdbd3439
|
||||
enter password for repository:
|
||||
removed snapshot d3f01f63
|
||||
|
||||
Afterwards this snapshot is removed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
But the data that was referenced by files in this snapshot is still
|
||||
stored in the repository. To cleanup unreferenced data, the ``prune``
|
||||
command must be run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup prune
|
||||
enter password for repository:
|
||||
|
||||
counting files in repo
|
||||
building new index for repo
|
||||
[0:00] 100.00% 22 / 22 files
|
||||
repository contains 22 packs (8512 blobs) with 100.092 MiB bytes
|
||||
processed 8512 blobs: 0 duplicate blobs, 0B duplicate
|
||||
load all snapshots
|
||||
find data that is still in use for 1 snapshots
|
||||
[0:00] 100.00% 1 / 1 snapshots
|
||||
found 8433 of 8512 data blobs still in use
|
||||
will rewrite 3 packs
|
||||
creating new index
|
||||
[0:00] 86.36% 19 / 22 files
|
||||
saved new index as 544a5084
|
||||
done
|
||||
|
||||
Afterwards the repository is smaller.
|
||||
|
||||
You can automate this two-step process by using the ``--prune`` switch
|
||||
to ``forget``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic forget --keep-last 1 --prune
|
||||
snapshots for host mopped, directories /home/user/work:
|
||||
|
||||
keep 1 snapshots:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
4bba301e 2017-02-21 10:49:18 mopped /home/user/work
|
||||
|
||||
remove 1 snapshots:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
8c02b94b 2017-02-21 10:48:33 mopped /home/user/work
|
||||
|
||||
1 snapshots have been removed, running prune
|
||||
counting files in repo
|
||||
building new index for repo
|
||||
[0:00] 100.00% 37 / 37 packs
|
||||
repository contains 37 packs (5521 blobs) with 151.012 MiB bytes
|
||||
processed 5521 blobs: 0 duplicate blobs, 0B duplicate
|
||||
load all snapshots
|
||||
find data that is still in use for 1 snapshots
|
||||
[0:00] 100.00% 1 / 1 snapshots
|
||||
found 5323 of 5521 data blobs still in use, removing 198 blobs
|
||||
will delete 0 packs and rewrite 27 packs, this frees 22.106 MiB
|
||||
creating new index
|
||||
[0:00] 100.00% 30 / 30 packs
|
||||
saved new index as b49f3e68
|
||||
done
|
||||
|
||||
Removing snapshots according to a policy
|
||||
****************************************
|
||||
|
||||
Removing snapshots manually is tedious and error-prone, therefore restic
|
||||
allows specifying which snapshots should be removed automatically
|
||||
according to a policy. You can specify how many hourly, daily, weekly,
|
||||
monthly and yearly snapshots to keep, any other snapshots are removed.
|
||||
The most important command-line parameter here is ``--dry-run`` which
|
||||
instructs restic to not remove anything but print which snapshots would
|
||||
be removed.
|
||||
|
||||
When ``forget`` is run with a policy, restic loads the list of all
|
||||
snapshots, then groups these by host name and list of directories. The grouping
|
||||
options can be set with ``--group-by``, to only group snapshots by paths and
|
||||
tags use ``--group-by paths,tags``. The policy is then applied to each group of
|
||||
snapshots separately. This is a safety feature.
|
||||
|
||||
The ``forget`` command accepts the following parameters:
|
||||
- ``--keep-last n`` never delete the ``n`` last (most recent) snapshots
|
||||
- ``--keep-hourly n`` for the last ``n`` hours in which a snapshot was
|
||||
made, keep only the last snapshot for each hour.
|
||||
- ``--keep-daily n`` for the last ``n`` days which have one or more
|
||||
snapshots, only keep the last one for that day.
|
||||
- ``--keep-weekly n`` for the last ``n`` weeks which have one or more
|
||||
snapshots, only keep the last one for that week.
|
||||
- ``--keep-monthly n`` for the last ``n`` months which have one or more
|
||||
snapshots, only keep the last one for that month.
|
||||
- ``--keep-yearly n`` for the last ``n`` years which have one or more
|
||||
snapshots, only keep the last one for that year.
|
||||
- ``--keep-tag`` keep all snapshots which have all tags specified by
|
||||
this option (can be specified multiple times).
|
||||
|
||||
Additionally, you can restrict removing snapshots to those which have a
|
||||
particular hostname with the ``--hostname`` parameter, or tags with the
|
||||
``--tag`` option. When multiple tags are specified, only the snapshots
|
||||
which have all the tags are considered. For example, the following command
|
||||
removes all but the latest snapshot of all snapshots that have the tag ``foo``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic forget --tag foo --keep-last 1
|
||||
|
||||
This command removes all but the last snapshot of all snapshots that have
|
||||
either the ``foo`` or ``bar`` tag set:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic forget --tag foo --tag bar --keep-last 1
|
||||
|
||||
To only keep the last snapshot of all snapshots with both the tag ``foo`` and
|
||||
``bar`` set use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic forget --tag foo,tag bar --keep-last 1
|
||||
|
||||
All the ``--keep-*`` options above only count
|
||||
hours/days/weeks/months/years which have a snapshot, so those without a
|
||||
snapshot are ignored.
|
||||
|
||||
All snapshots are evaluated counted against all matching keep-* counts. A
|
||||
single snapshot on 2017-09-30 (Sun) will count as a daily, weekly and monthly.
|
||||
|
||||
Let's explain this with an example: Suppose you have only made a backup
|
||||
on each Sunday for 12 weeks. Then ``forget --keep-daily 4`` will keep
|
||||
the last four snapshots for the last four Sundays, but remove the rest.
|
||||
Only counting the days which have a backup and ignore the ones without
|
||||
is a safety feature: it prevents restic from removing many snapshots
|
||||
when no new ones are created. If it was implemented otherwise, running
|
||||
``forget --keep-daily 4`` on a Friday would remove all snapshots!
|
||||
|
||||
Another example: Suppose you make daily backups for 100 years. Then
|
||||
``forget --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 75``
|
||||
will keep the most recent 7 daily snapshots, then 4 (remember, 7 dailies
|
||||
already include a week!) last-day-of-the-weeks and 11 or 12
|
||||
last-day-of-the-months (11 or 12 depends if the 5 weeklies cross a month).
|
||||
And finally 75 last-day-of-the-year snapshots. All other snapshots are
|
||||
removed.
|
||||
|
51
doc/070_encryption.rst
Normal file
51
doc/070_encryption.rst
Normal file
@@ -0,0 +1,51 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
##########
|
||||
Encryption
|
||||
##########
|
||||
|
||||
|
||||
*"The design might not be perfect, but it’s good. Encryption is a first-class feature,
|
||||
the implementation looks sane and I guess the deduplication trade-off is worth it. So… I’m going to use restic for
|
||||
my personal backups.*" `Filippo Valsorda`_
|
||||
|
||||
.. _Filippo Valsorda: https://blog.filippo.io/restic-cryptography/
|
||||
|
||||
**********************
|
||||
Manage repository keys
|
||||
**********************
|
||||
|
||||
The ``key`` command allows you to set multiple access keys or passwords
|
||||
per repository. In fact, you can use the ``list``, ``add``, ``remove``
|
||||
and ``passwd`` sub-commands to manage these keys very precisely:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup key list
|
||||
enter password for repository:
|
||||
ID User Host Created
|
||||
----------------------------------------------------------------------
|
||||
*eb78040b username kasimir 2015-08-12 13:29:57
|
||||
|
||||
$ restic -r /tmp/backup key add
|
||||
enter password for repository:
|
||||
enter password for new key:
|
||||
enter password again:
|
||||
saved new key as <Key of username@kasimir, created on 2015-08-12 13:35:05.316831933 +0200 CEST>
|
||||
|
||||
$ restic -r backup key list
|
||||
enter password for repository:
|
||||
ID User Host Created
|
||||
----------------------------------------------------------------------
|
||||
5c657874 username kasimir 2015-08-12 13:35:05
|
||||
*eb78040b username kasimir 2015-08-12 13:29:57
|
@@ -1,15 +1,32 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
########
|
||||
Examples
|
||||
########
|
||||
|
||||
********************************
|
||||
Setting up restic with Amazon S3
|
||||
================================
|
||||
********************************
|
||||
|
||||
Preface
|
||||
-------
|
||||
=======
|
||||
|
||||
This tutorial will show you how to use restic with AWS S3. It will show you how
|
||||
to navigate the AWS web interface, create an S3 bucket, create a user with
|
||||
access to only this bucket, and finally how to connect restic to this bucket.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
=============
|
||||
|
||||
You should already have a ``restic`` binary available on your system that you can
|
||||
run. Furthermore, you should also have an account with
|
||||
@@ -19,7 +36,7 @@ details for billing purposes, even if you use their
|
||||
|
||||
|
||||
Logging into AWS
|
||||
----------------
|
||||
================
|
||||
|
||||
Point your browser to
|
||||
https://console.aws.amazon.com
|
||||
@@ -39,7 +56,7 @@ Access Management (IAM) are relevant.
|
||||
|
||||
|
||||
Creating the bucket
|
||||
-------------------
|
||||
===================
|
||||
|
||||
First, a bucket to store your backups in must be created. Using the "Services"
|
||||
menu, navigate to S3. In case you already have some S3 buckets, you will see a
|
||||
@@ -71,7 +88,7 @@ buckets:
|
||||
:alt: List With New Bucket
|
||||
|
||||
Creating a user
|
||||
---------------
|
||||
===============
|
||||
|
||||
Use the "Services" menu of the AWS web interface to navigate to IAM. This will
|
||||
bring you to the IAM homepage. To create a new user, click on the "Users" menu
|
||||
@@ -177,7 +194,7 @@ browser now.
|
||||
|
||||
|
||||
Initializing the restic repository
|
||||
----------------------------------
|
||||
==================================
|
||||
|
||||
Open a terminal and make sure you have the ``restic`` binary ready. First, choose
|
||||
a password to encrypt your backups with. In this tutorial, ``apg`` is used for
|
@@ -1,8 +1,75 @@
|
||||
Development
|
||||
===========
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
#############
|
||||
Participating
|
||||
#############
|
||||
|
||||
*********
|
||||
Debugging
|
||||
*********
|
||||
|
||||
The program can be built with debug support like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ go run build.go -tags debug
|
||||
|
||||
Afterwards, extensive debug messages are written to the file in
|
||||
environment variable ``DEBUG_LOG``, e.g.:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DEBUG_LOG=/tmp/restic-debug.log restic backup ~/work
|
||||
|
||||
If you suspect that there is a bug, you can have a look at the debug
|
||||
log. Please be aware that the debug log might contain sensitive
|
||||
information such as file and directory names.
|
||||
|
||||
The debug log will always contain all log messages restic generates. You
|
||||
can also instruct restic to print some or all debug messages to stderr.
|
||||
These can also be limited to e.g. a list of source files or a list of
|
||||
patterns for function names. The patterns are globbing patterns (see the
|
||||
documentation for `path.Glob <https://golang.org/pkg/path/#Glob>`__), multiple
|
||||
patterns are separated by commas. Patterns are case sensitive.
|
||||
|
||||
Printing all log messages to the console can be achieved by setting the
|
||||
file filter to ``*``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DEBUG_FILES=* restic check
|
||||
|
||||
If you want restic to just print all debug log messages from the files
|
||||
``main.go`` and ``lock.go``, set the environment variable
|
||||
``DEBUG_FILES`` like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DEBUG_FILES=main.go,lock.go restic check
|
||||
|
||||
The following command line instructs restic to only print debug
|
||||
statements originating in functions that match the pattern ``*unlock*``
|
||||
(case sensitive):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ DEBUG_FUNCS=*unlock* restic check
|
||||
|
||||
|
||||
************
|
||||
Contributing
|
||||
************
|
||||
|
||||
Contribute
|
||||
----------
|
||||
Contributions are welcome! Please **open an issue first** (or add a
|
||||
comment to an existing issue) if you plan to work on any code or add a
|
||||
new feature. This way, duplicate work is prevented and we can discuss
|
||||
@@ -21,8 +88,10 @@ A few issues have been tagged with the label ``help wanted``, you can
|
||||
start looking at those:
|
||||
https://github.com/restic/restic/labels/help%20wanted
|
||||
|
||||
********
|
||||
Security
|
||||
--------
|
||||
********
|
||||
|
||||
**Important**: If you discover something that you believe to be a
|
||||
possible critical security problem, please do *not* open a GitHub issue
|
||||
but send an email directly to alexander@bumpern.de. If possible, please
|
||||
@@ -36,8 +105,9 @@ encrypt your email using the following PGP key
|
||||
uid Alexander Neumann <alexander@bumpern.de>
|
||||
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
|
||||
|
||||
*************
|
||||
Compatibility
|
||||
-------------
|
||||
*************
|
||||
|
||||
Backward compatibility for backups is important so that our users are
|
||||
always able to restore saved data. Therefore restic follows `Semantic
|
||||
@@ -52,8 +122,9 @@ version; as long as we do not increment the major version, data can be
|
||||
read and restored. We strive to be fully backward compatible to all
|
||||
prior versions.
|
||||
|
||||
**********************
|
||||
Building documentation
|
||||
----------------------
|
||||
**********************
|
||||
|
||||
The restic documentation is built with `Sphinx <http://sphinx-doc.org>`__,
|
||||
therefore building it locally requires a recent Python version and requirements listed in ``doc/requirements.txt``.
|
@@ -1,8 +1,25 @@
|
||||
..
|
||||
Normally, there are no heading levels assigned to certain characters as the structure is
|
||||
determined from the succession of headings. However, this convention is used in Python’s
|
||||
Style Guide for documenting which you may follow:
|
||||
|
||||
# with overline, for parts
|
||||
* for chapters
|
||||
= for sections
|
||||
- for subsections
|
||||
^ for subsubsections
|
||||
" for paragraphs
|
||||
|
||||
##########
|
||||
References
|
||||
##########
|
||||
|
||||
******
|
||||
Design
|
||||
======
|
||||
******
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
===========
|
||||
|
||||
This section introduces terminology used in this document.
|
||||
|
||||
@@ -26,7 +43,7 @@ the repository. This ID is required in order to load the file from the
|
||||
repository.
|
||||
|
||||
Repository Format
|
||||
-----------------
|
||||
=================
|
||||
|
||||
All data is stored in a restic repository. A repository is able to store
|
||||
data of several different types, which can later be requested based on
|
||||
@@ -77,7 +94,7 @@ locally. The field ``chunker_polynomial`` contains a parameter that is
|
||||
used for splitting large files into smaller chunks (see below).
|
||||
|
||||
Repository Layout
|
||||
~~~~~~~~~~~~~~~~~
|
||||
-----------------
|
||||
|
||||
The ``local`` and ``sftp`` backends are implemented using files and
|
||||
directories stored in a file system. The directory layout is the same
|
||||
@@ -125,7 +142,7 @@ the option ``-o local.layout=default``, valid values are ``default`` and
|
||||
s3 backend ``s3.layout``.
|
||||
|
||||
S3 Legacy Layout
|
||||
~~~~~~~~~~~~~~~~
|
||||
----------------
|
||||
|
||||
Unfortunately during development the AWS S3 backend uses slightly different
|
||||
paths (directory names use singular instead of plural for ``key``,
|
||||
@@ -154,7 +171,7 @@ The S3 backend understands and accepts both forms, new backends are
|
||||
always created with the default layout for compatibility reasons.
|
||||
|
||||
Pack Format
|
||||
-----------
|
||||
===========
|
||||
|
||||
All files in the repository except Key and Pack files just contain raw
|
||||
data, stored as ``IV || Ciphertext || MAC``. Pack files may contain one
|
||||
@@ -212,7 +229,7 @@ header. Afterwards, the header can be read and parsed, which yields all
|
||||
plaintext hashes, types, offsets and lengths of all included blobs.
|
||||
|
||||
Indexing
|
||||
--------
|
||||
========
|
||||
|
||||
Index files contain information about Data and Tree Blobs and the Packs
|
||||
they are contained in and store this information in the repository. When
|
||||
@@ -268,7 +285,7 @@ on non-disjoint sets of Packs. The number of packs described in a single
|
||||
file is chosen so that the file size is kept below 8 MiB.
|
||||
|
||||
Keys, Encryption and MAC
|
||||
------------------------
|
||||
========================
|
||||
|
||||
All data stored by restic in the repository is encrypted with AES-256 in
|
||||
counter mode and authenticated using Poly1305-AES. For encrypting new
|
||||
@@ -347,7 +364,7 @@ each. This way, the password can be changed without having to re-encrypt
|
||||
all data.
|
||||
|
||||
Snapshots
|
||||
---------
|
||||
=========
|
||||
|
||||
A snapshot represents a directory with all files and sub-directories at
|
||||
a given point in time. For each backup that is made, a new snapshot is
|
||||
@@ -417,7 +434,7 @@ a Pack file , an index is used. If the index is not available, the
|
||||
header of all data Blobs can be read.
|
||||
|
||||
Trees and Data
|
||||
--------------
|
||||
==============
|
||||
|
||||
A snapshot references a tree by the SHA-256 hash of the JSON string
|
||||
representation of its contents. Trees and data are saved in pack files
|
||||
@@ -503,7 +520,7 @@ matches the plaintext hash from the map included in the tree above, so
|
||||
the correct data has been returned.
|
||||
|
||||
Locks
|
||||
-----
|
||||
=====
|
||||
|
||||
The restic repository structure is designed in a way that allows
|
||||
parallel access of multiple instance of restic and even parallel writes.
|
||||
@@ -547,7 +564,7 @@ appeared in the repository. Depending on the type of the other locks and
|
||||
the lock to be created, restic either continues or fails.
|
||||
|
||||
Backups and Deduplication
|
||||
-------------------------
|
||||
=========================
|
||||
|
||||
For creating a backup, restic scans the source directory for all files,
|
||||
sub-directories and other entries. The data from each file is split into
|
||||
@@ -565,7 +582,7 @@ backup. This even works if bytes are inserted or removed at arbitrary
|
||||
positions within the file.
|
||||
|
||||
Threat Model
|
||||
------------
|
||||
============
|
||||
|
||||
The design goals for restic include being able to securely store backups
|
||||
in a location that is not completely trusted, e.g. a shared system where
|
||||
@@ -607,3 +624,140 @@ stored files) which files belong to what snapshot. When only these files
|
||||
are deleted, the particular snapshot vanished and all snapshots
|
||||
depending on data that has been added in the snapshot cannot be restored
|
||||
completely. Restic is not designed to detect this attack.
|
||||
|
||||
Local Cache
|
||||
===========
|
||||
|
||||
In order to speed up certain operations, restic manages a local cache of data.
|
||||
This document describes the data structures for the local cache with version 1.
|
||||
|
||||
Versions
|
||||
--------
|
||||
|
||||
The cache directory is selected according to the `XDG base dir specification
|
||||
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`__.
|
||||
Each repository has its own cache sub-directory, consting of the repository ID
|
||||
which is chosen at ``init``. All cache directories for different repos are
|
||||
independent of each other.
|
||||
|
||||
The cache dir for a repo contains a file named ``version``, which contains a
|
||||
single ASCII integer line that stands for the current version of the cache. If
|
||||
a lower version number is found the cache is recreated with the current
|
||||
version. If a higher version number is found the cache is ignored and left as
|
||||
is.
|
||||
|
||||
Snapshots and Indexes
|
||||
---------------------
|
||||
|
||||
Snapshot, Data and Index files are cached in the sub-directories ``snapshots``,
|
||||
``data`` and ``index``, as read from the repository.
|
||||
|
||||
|
||||
************
|
||||
REST Backend
|
||||
************
|
||||
|
||||
Restic can interact with HTTP Backend that respects the following REST
|
||||
API. The following values are valid for ``{type}``: ``data``, ``keys``,
|
||||
``locks``, ``snapshots``, ``index``, ``config``. ``{path}`` is a path to
|
||||
the repository, so that multiple different repositories can be accessed.
|
||||
The default path is ``/``.
|
||||
|
||||
POST {path}?create=true
|
||||
=======================
|
||||
|
||||
This request is used to initially create a new repository. The server
|
||||
responds with "200 OK" if the repository structure was created
|
||||
successfully or already exists, otherwise an error is returned.
|
||||
|
||||
DELETE {path}
|
||||
=============
|
||||
|
||||
Deletes the repository on the server side. The server responds with "200
|
||||
OK" if the repository was successfully removed. If this function is not
|
||||
implemented the server returns "501 Not Implemented", if this it is
|
||||
denied by the server it returns "403 Forbidden".
|
||||
|
||||
HEAD {path}/config
|
||||
==================
|
||||
|
||||
Returns "200 OK" if the repository has a configuration, an HTTP error
|
||||
otherwise.
|
||||
|
||||
GET {path}/config
|
||||
=================
|
||||
|
||||
Returns the content of the configuration file if the repository has a
|
||||
configuration, an HTTP error otherwise.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/config
|
||||
==================
|
||||
|
||||
Returns "200 OK" if the configuration of the request body has been
|
||||
saved, an HTTP error otherwise.
|
||||
|
||||
GET {path}/{type}/
|
||||
==================
|
||||
|
||||
Returns a JSON array containing the names of all the blobs stored for a
|
||||
given type.
|
||||
|
||||
Response format: JSON
|
||||
|
||||
HEAD {path}/{type}/{name}
|
||||
=========================
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type is stored in
|
||||
the repository, "404 not found" otherwise. If the blob exists, the HTTP
|
||||
header ``Content-Length`` is set to the file size.
|
||||
|
||||
GET {path}/{type}/{name}
|
||||
========================
|
||||
|
||||
Returns the content of the blob with the given name and type if it is
|
||||
stored in the repository, "404 not found" otherwise.
|
||||
|
||||
If the request specifies a partial read with a Range header field, then
|
||||
the status code of the response is 206 instead of 200 and the response
|
||||
only contains the specified range.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/{type}/{name}
|
||||
=========================
|
||||
|
||||
Saves the content of the request body as a blob with the given name and
|
||||
type, an HTTP error otherwise.
|
||||
|
||||
Request format: binary/octet-stream
|
||||
|
||||
DELETE {path}/{type}/{name}
|
||||
===========================
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type has been
|
||||
deleted from the repository, an HTTP error otherwise.
|
||||
|
||||
|
||||
*****
|
||||
Talks
|
||||
*****
|
||||
|
||||
The following talks will be or have been given about restic:
|
||||
|
||||
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
|
||||
Brussels, Belgium
|
||||
- 2016-01-29: `restic - Backups mal
|
||||
richtig <https://media.ccc.de/v/c4.openchaos.2016.01.restic>`__:
|
||||
Public lecture in German at `CCC Cologne
|
||||
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
|
||||
- 2015-08-23: `A Solution to the Backup
|
||||
Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
|
||||
Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
|
||||
- 2015-02-01: `Lightning Talk at FOSDEM
|
||||
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
|
||||
short introduction (with slightly outdated command line)
|
||||
- 2015-01-27: `Talk about restic at CCC
|
||||
Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
|
||||
(in German)
|
@@ -1 +0,0 @@
|
||||
design.rst
|
1193
doc/bash-completion.sh
Normal file
1193
doc/bash-completion.sh
Normal file
File diff suppressed because it is too large
Load Diff
26
doc/cache.rst
Normal file
26
doc/cache.rst
Normal file
@@ -0,0 +1,26 @@
|
||||
Local Cache
|
||||
===========
|
||||
|
||||
In order to speed up certain operations, restic manages a local cache of data.
|
||||
This document describes the data structures for the local cache with version 1.
|
||||
|
||||
Versions
|
||||
--------
|
||||
|
||||
The cache directory is selected according to the `XDG base dir specification
|
||||
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`__.
|
||||
Each repository has its own cache sub-directory, consting of the repository ID
|
||||
which is chosen at ``init``. All cache directories for different repos are
|
||||
independent of each other.
|
||||
|
||||
The cache dir for a repo contains a file named ``version``, which contains a
|
||||
single ASCII integer line that stands for the current version of the cache. If
|
||||
a lower version number is found the cache is recreated with the current
|
||||
version. If a higher version number is found the cache is ignored and left as
|
||||
is.
|
||||
|
||||
Snapshots and Indexes
|
||||
---------------------
|
||||
|
||||
Snapshot, Data and Index files are cached in the sub-directories ``snapshots``,
|
||||
``data`` and ``index``, as read from the repository.
|
74
doc/faq.rst
74
doc/faq.rst
@@ -32,13 +32,14 @@ How can I specify encryption passwords automatically?
|
||||
|
||||
When you run ``restic backup``, you need to enter the passphrase on
|
||||
the console. This is not very convenient for automated backups, so you
|
||||
can also provide the password through the ``--password-file`` option
|
||||
or ``RESTIC_PASSWORD`` environment. A discussion is in progress over
|
||||
implementing unattended backups happens in :issue:`533`.
|
||||
can also provide the password through the ``--password-file`` option, or one of
|
||||
the environment variables ``RESTIC_PASSWORD`` or ``RESTIC_PASSWORD_FILE``.
|
||||
A discussion is in progress over implementing unattended backups happens in
|
||||
:issue:`533`.
|
||||
|
||||
.. important:: Be careful how you set the environment; using the env
|
||||
command, a `system()` call or using inline shell
|
||||
scripts (e.g. `RESTIC_PASSWORD=password borg ...`)
|
||||
scripts (e.g. `RESTIC_PASSWORD=password restic ...`)
|
||||
might expose the credentials in the process list
|
||||
directly and they will be readable to all users on a
|
||||
system. Using export in a shell script file should be
|
||||
@@ -49,3 +50,68 @@ implementing unattended backups happens in :issue:`533`.
|
||||
root).
|
||||
|
||||
.. _accessible only to that user: https://security.stackexchange.com/questions/14000/environment-variable-accessibility-in-linux/14009#14009
|
||||
|
||||
How to prioritize restic's IO and CPU time
|
||||
------------------------------------------
|
||||
|
||||
If you'd like to change the **IO priority** of restic, run it in the following way
|
||||
|
||||
::
|
||||
|
||||
$ ionice -c2 -n0 ./restic -r /media/your/backup/ backup /home
|
||||
|
||||
This runs ``restic`` in the so-called best *effort class* (``-c2``),
|
||||
with the highest possible priority (``-n0``).
|
||||
|
||||
Take a look at the `ionice manpage`_ to learn about the other classes.
|
||||
|
||||
.. _ionice manpage: https://linux.die.net/man/1/ionice
|
||||
|
||||
|
||||
To change the **CPU scheduling priority** to a higher-than-standard
|
||||
value, use would run:
|
||||
|
||||
::
|
||||
|
||||
$ nice --10 ./restic -r /media/your/backup/ backup /home
|
||||
|
||||
Again, the `nice manpage`_ has more information.
|
||||
|
||||
.. _nice manpage: https://linux.die.net/man/1/nice
|
||||
|
||||
You can also **combine IO and CPU scheduling priority**:
|
||||
|
||||
::
|
||||
|
||||
$ ionice -c2 nice -n19 ./restic -r /media/gour/backup/ backup /home
|
||||
|
||||
This example puts restic in the IO class 2 (best effort) and tells the CPU
|
||||
scheduling algorithm to give it the least favorable niceness (19).
|
||||
|
||||
The above example makes sure that the system the backup runs on
|
||||
is not slowed down, which is particularly useful for servers.
|
||||
|
||||
Creating new repo on a Synology NAS via sftp fails
|
||||
--------------------------------------------------
|
||||
|
||||
Sometimes creating a new restic repository on a Synology NAS via sftp fails
|
||||
with an error similar to the following:
|
||||
|
||||
::
|
||||
|
||||
$ restic init -r sftp:user@nas:/volume1/restic-repo init
|
||||
create backend at sftp:user@nas:/volume1/restic-repo/ failed:
|
||||
mkdirAll(/volume1/restic-repo/index): unable to create directories: [...]
|
||||
|
||||
Although you can log into the NAS via SSH and see that the directory structure
|
||||
is there.
|
||||
|
||||
The reason for this behavior is that apparently Synology NAS expose a different
|
||||
directory structure via sftp, so the path that needs to be specified is
|
||||
different than the directory structure on the device and maybe even as exposed
|
||||
via other protocols.
|
||||
|
||||
The following may work:
|
||||
|
||||
::
|
||||
$ restic init -r sftp:user@nas:/restic-repo init
|
||||
|
@@ -4,12 +4,16 @@ Restic Documentation
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
installation
|
||||
manual
|
||||
010_introduction
|
||||
020_installation
|
||||
030_preparing_a_new_repo
|
||||
040_backup
|
||||
045_working_with_repos
|
||||
050_restore
|
||||
060_forget
|
||||
070_encryption
|
||||
080_examples
|
||||
090_participating
|
||||
100_references
|
||||
faq
|
||||
tutorials
|
||||
development
|
||||
references
|
||||
talks
|
||||
|
||||
.. include:: ../README.rst
|
||||
manual_rest
|
||||
|
@@ -1,68 +0,0 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
Packages
|
||||
--------
|
||||
|
||||
Mac OS X
|
||||
~~~~~~~~~
|
||||
|
||||
If you are using Mac OS X, you can install restic using the
|
||||
`homebrew <http://brew.sh/>`__ package manager:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ brew tap restic/restic
|
||||
$ brew install restic
|
||||
|
||||
archlinux
|
||||
~~~~~~~~~
|
||||
|
||||
On archlinux, there is a package called ``restic-git`` which can be
|
||||
installed from AUR, e.g. with ``pacaur``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pacaur -S restic-git
|
||||
|
||||
Pre-compiled Binary
|
||||
-------------------
|
||||
|
||||
You can download the latest pre-compiled binary from the `restic release
|
||||
page <https://github.com/restic/restic/releases/latest>`__.
|
||||
|
||||
From Source
|
||||
-----------
|
||||
|
||||
restic is written in the Go programming language and you need at least
|
||||
Go version 1.8. Building restic may also work with older versions of Go,
|
||||
but that's not supported. See the `Getting
|
||||
started <https://golang.org/doc/install>`__ guide of the Go project for
|
||||
instructions how to install Go.
|
||||
|
||||
In order to build restic from source, execute the following steps:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ git clone https://github.com/restic/restic
|
||||
[...]
|
||||
|
||||
$ cd restic
|
||||
|
||||
$ go run build.go
|
||||
|
||||
You can easily cross-compile restic for all supported platforms, just
|
||||
supply the target OS and platform via the command-line options like this
|
||||
(for Windows and FreeBSD respectively):
|
||||
|
||||
::
|
||||
|
||||
$ go run build.go --goos windows --goarch amd64
|
||||
|
||||
$ go run build.go --goos freebsd --goarch 386
|
||||
|
||||
The resulting binary is statically linked and does not require any
|
||||
libraries.
|
||||
|
||||
At the moment, the only tested compiler for restic is the official Go
|
||||
compiler. Building restic with gccgo may work, but is not supported.
|
@@ -1,70 +0,0 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
restic\-autocomplete \- Generate shell autocompletion script
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBrestic autocomplete [flags]\fP
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
The "autocomplete" command generates a shell autocompletion script.
|
||||
|
||||
.PP
|
||||
NOTE: The current version supports Bash only.
|
||||
This should work for *nix systems with Bash installed.
|
||||
|
||||
.PP
|
||||
By default, the file is written directly to /etc/bash\_completion.d
|
||||
for convenience, and the command may need superuser rights, e.g.:
|
||||
|
||||
.PP
|
||||
$ sudo restic autocomplete
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
\fB\-\-completionfile\fP="/usr/share/bash\-completion/completions/restic"
|
||||
autocompletion file
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for autocomplete
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
||||
.PP
|
||||
\fB\-o\fP, \fB\-\-option\fP=[]
|
||||
set extended option (\fB\fCkey=value\fR, can be specified multiple times)
|
||||
|
||||
.PP
|
||||
\fB\-p\fP, \fB\-\-password\-file\fP=""
|
||||
read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE)
|
||||
|
||||
.PP
|
||||
\fB\-q\fP, \fB\-\-quiet\fP[=false]
|
||||
do not output comprehensive progress report
|
||||
|
||||
.PP
|
||||
\fB\-r\fP, \fB\-\-repo\fP=""
|
||||
repository to backup to or restore from (default: $RESTIC\_REPOSITORY)
|
||||
|
||||
|
||||
.SH SEE ALSO
|
||||
.PP
|
||||
\fBrestic(1)\fP
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -50,7 +50,7 @@ given as the arguments.
|
||||
|
||||
.PP
|
||||
\fB\-\-hostname\fP=""
|
||||
set the \fB\fChostname\fR for the snapshot manually
|
||||
set the \fB\fChostname\fR for the snapshot manually. To prevent an expensive rescan use the "parent" flag
|
||||
|
||||
.PP
|
||||
\fB\-x\fP, \fB\-\-one\-file\-system\fP[=false]
|
||||
@@ -78,10 +78,30 @@ given as the arguments.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -25,10 +25,30 @@ The "cat" command is used to print internal objects to stdout.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -18,6 +18,10 @@ restic\-check \- Check the repository for errors
|
||||
The "check" command tests the repository for errors and reports any errors it
|
||||
finds. It can also be used to read all data and therefore simulate a restore.
|
||||
|
||||
.PP
|
||||
By default, the "check" command will always load all data directly from the
|
||||
repository and not use a local cache.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
@@ -32,12 +36,36 @@ finds. It can also be used to read all data and therefore simulate a restore.
|
||||
\fB\-\-read\-data\fP[=false]
|
||||
read all data blobs
|
||||
|
||||
.PP
|
||||
\fB\-\-with\-cache\fP[=false]
|
||||
use the cache
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,22 +1,26 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
restic\-dump \- Dump data structures
|
||||
restic\-dump \- Print a backed\-up file to stdout
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBrestic dump [indexes|snapshots|trees|all|packs] [flags]\fP
|
||||
\fBrestic dump [flags] snapshotID file\fP
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
The "dump" command dumps data structures from the repository as JSON objects. It
|
||||
is used for debugging purposes only.
|
||||
The "dump" command extracts a single file from a snapshot from the repository and
|
||||
prints its contents to stdout.
|
||||
|
||||
.PP
|
||||
The special snapshot "latest" can be used to use the latest snapshot in the
|
||||
repository.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
@@ -24,12 +28,44 @@ is used for debugging purposes only.
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for dump
|
||||
|
||||
.PP
|
||||
\fB\-H\fP, \fB\-\-host\fP=""
|
||||
only consider snapshots for this host when the snapshot ID is "latest"
|
||||
|
||||
.PP
|
||||
\fB\-\-path\fP=[]
|
||||
only consider snapshots which include this (absolute) \fB\fCpath\fR for snapshot ID "latest"
|
||||
|
||||
.PP
|
||||
\fB\-\-tag\fP=[]
|
||||
only consider snapshots which include this \fB\fCtaglist\fR for snapshot ID "latest"
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -58,10 +58,30 @@ repo.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -66,6 +66,10 @@ data after 'forget' was run successfully, see the 'prune' command.
|
||||
\fB\-\-path\fP=[]
|
||||
only consider snapshots which include this (absolute) \fB\fCpath\fR (can be specified multiple times)
|
||||
|
||||
.PP
|
||||
\fB\-c\fP, \fB\-\-compact\fP[=false]
|
||||
use compact format
|
||||
|
||||
.PP
|
||||
\fB\-g\fP, \fB\-\-group\-by\fP="host,paths"
|
||||
string for grouping snapshots by host,paths,tags
|
||||
@@ -84,10 +88,30 @@ data after 'forget' was run successfully, see the 'prune' command.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
88
doc/man/restic-generate.1
Normal file
88
doc/man/restic-generate.1
Normal file
@@ -0,0 +1,88 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
restic\-generate \- Generate manual pages and auto\-completion files (bash, zsh)
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBrestic generate [command] [flags]\fP
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
The "generate" command writes automatically generated files like the man pages
|
||||
and the auto\-completion files for bash and zsh).
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
\fB\-\-bash\-completion\fP=""
|
||||
write bash completion \fB\fCfile\fR
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for generate
|
||||
|
||||
.PP
|
||||
\fB\-\-man\fP=""
|
||||
write man pages to \fB\fCdirectory\fR
|
||||
|
||||
.PP
|
||||
\fB\-\-zsh\-completion\fP=""
|
||||
write zsh completion \fB\fCfile\fR
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
||||
.PP
|
||||
\fB\-o\fP, \fB\-\-option\fP=[]
|
||||
set extended option (\fB\fCkey=value\fR, can be specified multiple times)
|
||||
|
||||
.PP
|
||||
\fB\-p\fP, \fB\-\-password\-file\fP=""
|
||||
read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE)
|
||||
|
||||
.PP
|
||||
\fB\-q\fP, \fB\-\-quiet\fP[=false]
|
||||
do not output comprehensive progress report
|
||||
|
||||
.PP
|
||||
\fB\-r\fP, \fB\-\-repo\fP=""
|
||||
repository to backup to or restore from (default: $RESTIC\_REPOSITORY)
|
||||
|
||||
|
||||
.SH SEE ALSO
|
||||
.PP
|
||||
\fBrestic(1)\fP
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -25,10 +25,30 @@ The "init" command initializes a new repository.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -25,10 +25,30 @@ The "key" command manages keys (passwords) for accessing the repository.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -25,10 +25,30 @@ The "list" command allows listing objects in the repository based on type.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -44,10 +44,30 @@ The special snapshot\-ID "latest" can be used to list files and directories of t
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,61 +0,0 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
restic\-manpage \- Generate manual pages
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBrestic manpage [command] [flags]\fP
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
The "manpage" command generates a manual page for a single command. It can also
|
||||
be used to write all manual pages to a directory. If the output directory is
|
||||
set and no command is specified, all manpages are written to the directory.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for manpage
|
||||
|
||||
.PP
|
||||
\fB\-\-output\-dir\fP=""
|
||||
write man pages to this \fB\fCdirectory\fR
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
||||
.PP
|
||||
\fB\-o\fP, \fB\-\-option\fP=[]
|
||||
set extended option (\fB\fCkey=value\fR, can be specified multiple times)
|
||||
|
||||
.PP
|
||||
\fB\-p\fP, \fB\-\-password\-file\fP=""
|
||||
read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE)
|
||||
|
||||
.PP
|
||||
\fB\-q\fP, \fB\-\-quiet\fP[=false]
|
||||
do not output comprehensive progress report
|
||||
|
||||
.PP
|
||||
\fB\-r\fP, \fB\-\-repo\fP=""
|
||||
repository to backup to or restore from (default: $RESTIC\_REPOSITORY)
|
||||
|
||||
|
||||
.SH SEE ALSO
|
||||
.PP
|
||||
\fBrestic(1)\fP
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -30,10 +30,30 @@ name is explicitly given, a list of migrations that can be applied is printed.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -50,10 +50,30 @@ read\-only mount.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -26,10 +26,30 @@ referenced and therefore not needed any more.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -26,10 +26,30 @@ repository.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -54,10 +54,30 @@ repository.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -31,6 +31,10 @@ The "snapshots" command lists all snapshots stored in the repository.
|
||||
\fB\-H\fP, \fB\-\-host\fP=""
|
||||
only consider snapshots for this \fB\fChost\fR
|
||||
|
||||
.PP
|
||||
\fB\-\-last\fP[=false]
|
||||
only show the last snapshot for each host and path
|
||||
|
||||
.PP
|
||||
\fB\-\-path\fP=[]
|
||||
only consider snapshots for this \fB\fCpath\fR (can be specified multiple times)
|
||||
@@ -41,10 +45,30 @@ The "snapshots" command lists all snapshots stored in the repository.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -56,10 +56,30 @@ When no snapshot\-ID is given, all snapshots matching the host, tag and path fil
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -29,10 +29,30 @@ The "unlock" command removes stale locks that have been created by other restic
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -26,10 +26,30 @@ and the version of this software.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic manpage`" ""
|
||||
.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" ""
|
||||
.nh
|
||||
.ad l
|
||||
|
||||
@@ -20,6 +20,14 @@ directories in an encrypted repository stored on different backends.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
\fB\-\-cacert\fP=[]
|
||||
path to load root certificates from (default: use system certificates)
|
||||
|
||||
.PP
|
||||
\fB\-\-cache\-dir\fP=""
|
||||
set the cache directory
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for restic
|
||||
@@ -28,6 +36,18 @@ directories in an encrypted repository stored on different backends.
|
||||
\fB\-\-json\fP[=false]
|
||||
set output mode to JSON for commands that support it
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-download\fP=0
|
||||
limits downloads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-limit\-upload\fP=0
|
||||
limits uploads to a maximum rate in KiB/s. (default: unlimited)
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-cache\fP[=false]
|
||||
do not use a local cache
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-lock\fP[=false]
|
||||
do not lock the repo, this allows some operations on read\-only repos
|
||||
@@ -51,4 +71,4 @@ directories in an encrypted repository stored on different backends.
|
||||
|
||||
.SH SEE ALSO
|
||||
.PP
|
||||
\fBrestic\-autocomplete(1)\fP, \fBrestic\-backup(1)\fP, \fBrestic\-cat(1)\fP, \fBrestic\-check(1)\fP, \fBrestic\-dump(1)\fP, \fBrestic\-find(1)\fP, \fBrestic\-forget(1)\fP, \fBrestic\-init(1)\fP, \fBrestic\-key(1)\fP, \fBrestic\-list(1)\fP, \fBrestic\-ls(1)\fP, \fBrestic\-manpage(1)\fP, \fBrestic\-migrate(1)\fP, \fBrestic\-mount(1)\fP, \fBrestic\-prune(1)\fP, \fBrestic\-rebuild\-index(1)\fP, \fBrestic\-restore(1)\fP, \fBrestic\-snapshots(1)\fP, \fBrestic\-tag(1)\fP, \fBrestic\-unlock(1)\fP, \fBrestic\-version(1)\fP
|
||||
\fBrestic\-backup(1)\fP, \fBrestic\-cat(1)\fP, \fBrestic\-check(1)\fP, \fBrestic\-dump(1)\fP, \fBrestic\-find(1)\fP, \fBrestic\-forget(1)\fP, \fBrestic\-generate(1)\fP, \fBrestic\-init(1)\fP, \fBrestic\-key(1)\fP, \fBrestic\-list(1)\fP, \fBrestic\-ls(1)\fP, \fBrestic\-migrate(1)\fP, \fBrestic\-mount(1)\fP, \fBrestic\-prune(1)\fP, \fBrestic\-rebuild\-index(1)\fP, \fBrestic\-restore(1)\fP, \fBrestic\-snapshots(1)\fP, \fBrestic\-tag(1)\fP, \fBrestic\-unlock(1)\fP, \fBrestic\-version(1)\fP
|
||||
|
1205
doc/manual.rst
1205
doc/manual.rst
File diff suppressed because it is too large
Load Diff
282
doc/manual_rest.rst
Normal file
282
doc/manual_rest.rst
Normal file
@@ -0,0 +1,282 @@
|
||||
Manual
|
||||
======
|
||||
|
||||
Usage help
|
||||
----------
|
||||
|
||||
Usage help is available:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic --help
|
||||
restic is a backup program which allows saving multiple revisions of files and
|
||||
directories in an encrypted repository stored on different backends.
|
||||
|
||||
Usage:
|
||||
restic [command]
|
||||
|
||||
Available Commands:
|
||||
autocomplete Generate shell autocompletion script
|
||||
backup Create a new backup of files and/or directories
|
||||
cat Print internal objects to stdout
|
||||
check Check the repository for errors
|
||||
dump Dump data structures
|
||||
find Find a file or directory
|
||||
forget Remove snapshots from the repository
|
||||
help Help about any command
|
||||
init Initialize a new repository
|
||||
key Manage keys (passwords)
|
||||
list List items in the repository
|
||||
ls List files in a snapshot
|
||||
mount Mount the repository
|
||||
prune Remove unneeded data from the repository
|
||||
rebuild-index Build a new index file
|
||||
restore Extract the data from a snapshot
|
||||
snapshots List all snapshots
|
||||
tag Modify tags on snapshots
|
||||
unlock Remove locks other processes created
|
||||
version Print version information
|
||||
|
||||
Flags:
|
||||
--json set output mode to JSON for commands that support it
|
||||
--no-lock do not lock the repo, this allows some operations on read-only repos
|
||||
-p, --password-file string read the repository password from a file
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
|
||||
Use "restic [command] --help" for more information about a command.
|
||||
|
||||
Similar to programs such as ``git``, restic has a number of
|
||||
sub-commands. You can see these commands in the listing above. Each
|
||||
sub-command may have own command-line options, and there is a help
|
||||
option for each command which lists them, e.g. for the ``backup``
|
||||
command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic backup --help
|
||||
The "backup" command creates a new snapshot and saves the files and directories
|
||||
given as the arguments.
|
||||
|
||||
Usage:
|
||||
restic backup [flags] FILE/DIR [FILE/DIR] ...
|
||||
|
||||
Flags:
|
||||
-e, --exclude pattern exclude a pattern (can be specified multiple times)
|
||||
--exclude-file string read exclude patterns from a file
|
||||
--files-from string read the files to backup from file (can be combined with file args)
|
||||
-f, --force force re-reading the target files/directories. Overrides the "parent" flag
|
||||
-x, --one-file-system Exclude other file systems
|
||||
--parent string use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)
|
||||
--stdin read backup from stdin
|
||||
--stdin-filename string file name to use when reading from stdin
|
||||
--tag tag add a tag for the new snapshot (can be specified multiple times)
|
||||
--time string time of the backup (ex. '2012-11-01 22:08:41') (default: now)
|
||||
|
||||
Global Flags:
|
||||
--json set output mode to JSON for commands that support it
|
||||
--no-lock do not lock the repo, this allows some operations on read-only repos
|
||||
-p, --password-file string read the repository password from a file
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
|
||||
Subcommand that support showing progress information such as ``backup``,
|
||||
``check`` and ``prune`` will do so unless the quiet flag ``-q`` or
|
||||
``--quiet`` is set. When running from a non-interactive console progress
|
||||
reporting will be limited to once every 10 seconds to not fill your
|
||||
logs.
|
||||
|
||||
Additionally on Unix systems if ``restic`` receives a SIGUSR1 signal the
|
||||
current progress will written to the standard output so you can check up
|
||||
on the status at will.
|
||||
|
||||
Manage tags
|
||||
-----------
|
||||
|
||||
Managing tags on snapshots is done with the ``tag`` command. The
|
||||
existing set of tags can be replaced completely, tags can be added to
|
||||
removed. The result is directly visible in the ``snapshots`` command.
|
||||
|
||||
Let's say we want to tag snapshot ``590c8fc8`` with the tags ``NL`` and
|
||||
``CH`` and remove all other tags that may be present, the following
|
||||
command does that:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup tag --set NL --set CH 590c8fc8
|
||||
create exclusive lock for repository
|
||||
modified tags on 1 snapshots
|
||||
|
||||
Note the snapshot ID has changed, so between each change we need to look
|
||||
up the new ID of the snapshot. But there is an even better way, the
|
||||
``tag`` command accepts ``--tag`` for a filter, so we can filter
|
||||
snapshots based on the tag we just added.
|
||||
|
||||
So we can add and remove tags incrementally like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --remove CH
|
||||
create exclusive lock for repository
|
||||
modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --add UK
|
||||
create exclusive lock for repository
|
||||
modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --remove NL
|
||||
create exclusive lock for repository
|
||||
modified tags on 1 snapshots
|
||||
|
||||
$ restic -r /tmp/backup tag --tag NL --add SOMETHING
|
||||
no snapshots were modified
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
||||
Browse repository objects
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Internally, a repository stores data of several different types
|
||||
described in the `design
|
||||
documentation <https://github.com/restic/restic/blob/master/doc/Design.rst>`__.
|
||||
You can ``list`` objects such as blobs, packs, index, snapshots, keys or
|
||||
locks with the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup list snapshots
|
||||
d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
|
||||
|
||||
The ``find`` command searches for a given
|
||||
`pattern <http://golang.org/pkg/path/filepath/#Match>`__ in the
|
||||
repository.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r backup find test.txt
|
||||
debug log file restic.log
|
||||
debug enabled
|
||||
enter password for repository:
|
||||
found 1 matching entries in snapshot 196bc5760c909a7681647949e80e5448e276521489558525680acf1bd428af36
|
||||
-rw-r--r-- 501 20 5 2015-08-26 14:09:57 +0200 CEST path/to/test.txt
|
||||
|
||||
The ``cat`` command allows you to display the JSON representation of the
|
||||
objects or its raw content.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
|
||||
enter password for repository:
|
||||
{
|
||||
"time": "2015-08-12T12:52:44.091448856+02:00",
|
||||
"tree": "05cec17e8d3349f402576d02576a2971fc0d9f9776ce2f441c7010849c4ff5af",
|
||||
"paths": [
|
||||
"/home/user/work"
|
||||
],
|
||||
"hostname": "kasimir",
|
||||
"username": "username",
|
||||
"uid": 501,
|
||||
"gid": 20
|
||||
}
|
||||
|
||||
Metadata handling
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Restic saves and restores most default attributes, including extended attributes like ACLs.
|
||||
Sparse files are not handled in a special way yet, and aren't restored.
|
||||
|
||||
The following metadata is handled by restic:
|
||||
|
||||
- Name
|
||||
- Type
|
||||
- Mode
|
||||
- ModTime
|
||||
- AccessTime
|
||||
- ChangeTime
|
||||
- UID
|
||||
- GID
|
||||
- User
|
||||
- Group
|
||||
- Inode
|
||||
- Size
|
||||
- Links
|
||||
- LinkTarget
|
||||
- Device
|
||||
- Content
|
||||
- Subtree
|
||||
- ExtendedAttributes
|
||||
|
||||
Scripting
|
||||
---------
|
||||
|
||||
Restic supports the output of some commands in JSON format, the JSON
|
||||
data can then be processed by other programs (e.g.
|
||||
`jq <https://stedolan.github.io/jq/>`__). The following example
|
||||
lists all snapshots as JSON and uses ``jq`` to pretty-print the result:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /tmp/backup snapshots --json | jq .
|
||||
[
|
||||
{
|
||||
"time": "2017-03-11T09:57:43.26630619+01:00",
|
||||
"tree": "bf25241679533df554fc0fd0ae6dbb9dcf1859a13f2bc9dd4543c354eff6c464",
|
||||
"paths": [
|
||||
"/home/work/doc"
|
||||
],
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"id": "bbeed6d28159aa384d1ccc6fa0b540644b1b9599b162d2972acda86b1b80f89e"
|
||||
},
|
||||
{
|
||||
"time": "2017-03-11T09:58:57.541446938+01:00",
|
||||
"tree": "7f8c95d3420baaac28dc51609796ae0e0ecfb4862b609a9f38ffaf7ae2d758da",
|
||||
"paths": [
|
||||
"/home/user/shared"
|
||||
],
|
||||
"hostname": "kasimir",
|
||||
"username": "fd0",
|
||||
"uid": 1000,
|
||||
"gid": 100,
|
||||
"id": "b157d91c16f0ba56801ece3a708dfc53791fe2a97e827090d6ed9a69a6ebdca0"
|
||||
}
|
||||
]
|
||||
|
||||
Temporary files
|
||||
---------------
|
||||
|
||||
During some operations (e.g. ``backup`` and ``prune``) restic uses
|
||||
temporary files to store data. These files will, by default, be saved to
|
||||
the system's temporary directory, on Linux this is usually located in
|
||||
``/tmp/``. The environment variable ``TMPDIR`` can be used to specify a
|
||||
different directory, e.g. to use the directory ``/var/tmp/restic-tmp``
|
||||
instead of the default, set the environment variable like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export TMPDIR=/var/tmp/restic-tmp
|
||||
$ restic -r /tmp/backup backup ~/work
|
||||
|
||||
|
||||
|
||||
Caching
|
||||
-------
|
||||
|
||||
Restic keeps a cache with some files from the repository on the local machine.
|
||||
This allows faster operations, since meta data does not need to be loaded from
|
||||
a remote repository. The cache is automatically created, usually in an
|
||||
OS-specific cache folder:
|
||||
|
||||
* Linux/other: ``~/.cache/restic`` (or ``$XDG_CACHE_HOME/restic``)
|
||||
* macOS: ``~/Library/Caches/restic``
|
||||
* Windows: ``%LOCALAPPDATA%/restic``
|
||||
|
||||
The command line parameter ``--cache-dir`` can each be used to override the
|
||||
default cache location. The parameter ``--no-cache`` disables the cache
|
||||
entirely. In this case, all data is loaded from the repo.
|
||||
|
||||
The cache is ephemeral: When a file cannot be read from the cache, it is loaded
|
||||
from the repository.
|
@@ -1,9 +0,0 @@
|
||||
==========
|
||||
References
|
||||
==========
|
||||
|
||||
.. include:: design.rst
|
||||
|
||||
------------------------
|
||||
|
||||
.. include:: rest_backend.rst
|
@@ -1,84 +0,0 @@
|
||||
REST Backend
|
||||
============
|
||||
|
||||
Restic can interact with HTTP Backend that respects the following REST
|
||||
API. The following values are valid for ``{type}``: ``data``, ``keys``,
|
||||
``locks``, ``snapshots``, ``index``, ``config``. ``{path}`` is a path to
|
||||
the repository, so that multiple different repositories can be accessed.
|
||||
The default path is ``/``.
|
||||
|
||||
POST {path}?create=true
|
||||
-----------------------
|
||||
|
||||
This request is used to initially create a new repository. The server
|
||||
responds with "200 OK" if the repository structure was created
|
||||
successfully or already exists, otherwise an error is returned.
|
||||
|
||||
DELETE {path}
|
||||
-------------
|
||||
|
||||
Deletes the repository on the server side. The server responds with "200
|
||||
OK" if the repository was successfully removed. If this function is not
|
||||
implemented the server returns "501 Not Implemented", if this it is
|
||||
denied by the server it returns "403 Forbidden".
|
||||
|
||||
HEAD {path}/config
|
||||
------------------
|
||||
|
||||
Returns "200 OK" if the repository has a configuration, an HTTP error
|
||||
otherwise.
|
||||
|
||||
GET {path}/config
|
||||
-----------------
|
||||
|
||||
Returns the content of the configuration file if the repository has a
|
||||
configuration, an HTTP error otherwise.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/config
|
||||
------------------
|
||||
|
||||
Returns "200 OK" if the configuration of the request body has been
|
||||
saved, an HTTP error otherwise.
|
||||
|
||||
GET {path}/{type}/
|
||||
------------------
|
||||
|
||||
Returns a JSON array containing the names of all the blobs stored for a
|
||||
given type.
|
||||
|
||||
Response format: JSON
|
||||
|
||||
HEAD {path}/{type}/{name}
|
||||
-------------------------
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type is stored in
|
||||
the repository, "404 not found" otherwise. If the blob exists, the HTTP
|
||||
header ``Content-Length`` is set to the file size.
|
||||
|
||||
GET {path}/{type}/{name}
|
||||
------------------------
|
||||
|
||||
Returns the content of the blob with the given name and type if it is
|
||||
stored in the repository, "404 not found" otherwise.
|
||||
|
||||
If the request specifies a partial read with a Range header field, then
|
||||
the status code of the response is 206 instead of 200 and the response
|
||||
only contains the specified range.
|
||||
|
||||
Response format: binary/octet-stream
|
||||
|
||||
POST {path}/{type}/{name}
|
||||
-------------------------
|
||||
|
||||
Saves the content of the request body as a blob with the given name and
|
||||
type, an HTTP error otherwise.
|
||||
|
||||
Request format: binary/octet-stream
|
||||
|
||||
DELETE {path}/{type}/{name}
|
||||
---------------------------
|
||||
|
||||
Returns "200 OK" if the blob with the given name and type has been
|
||||
deleted from the repository, an HTTP error otherwise.
|
@@ -1,20 +0,0 @@
|
||||
Talks
|
||||
=====
|
||||
|
||||
The following talks will be or have been given about restic:
|
||||
|
||||
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
|
||||
Brussels, Belgium
|
||||
- 2016-01-29: `restic - Backups mal
|
||||
richtig <https://media.ccc.de/v/c4.openchaos.2016.01.restic>`__:
|
||||
Public lecture in German at `CCC Cologne
|
||||
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
|
||||
- 2015-08-23: `A Solution to the Backup
|
||||
Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
|
||||
Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
|
||||
- 2015-02-01: `Lightning Talk at FOSDEM
|
||||
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
|
||||
short introduction (with slightly outdated command line)
|
||||
- 2015-01-27: `Talk about restic at CCC
|
||||
Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
|
||||
(in German)
|
@@ -1,5 +0,0 @@
|
||||
==========
|
||||
Tutorials
|
||||
==========
|
||||
|
||||
.. include:: tutorial_aws_s3.rst
|
20
doc/zsh-completion.zsh
Normal file
20
doc/zsh-completion.zsh
Normal file
@@ -0,0 +1,20 @@
|
||||
#compdef restic
|
||||
|
||||
_arguments \
|
||||
'1: :->level1' \
|
||||
'2: :_files'
|
||||
case $state in
|
||||
level1)
|
||||
case $words[1] in
|
||||
restic)
|
||||
_arguments '1: :(backup cat check dump find forget generate help init key list ls migrate mount options prune rebuild-index restore snapshots tag unlock version)'
|
||||
;;
|
||||
*)
|
||||
_arguments '*: :_files'
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
_arguments '*: :_files'
|
||||
;;
|
||||
esac
|
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/restic/restic/internal/crypto"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
||||
@@ -32,6 +32,7 @@ type Rdr interface {
|
||||
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
|
||||
rd.Seek(0, 0)
|
||||
ch := chunker.New(rd, testPol)
|
||||
nonce := crypto.NewRandomNonce()
|
||||
|
||||
for {
|
||||
chunk, err := ch.Next(buf)
|
||||
@@ -40,14 +41,12 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
|
||||
break
|
||||
}
|
||||
|
||||
OK(b, err)
|
||||
rtest.OK(b, err)
|
||||
|
||||
// reduce length of buf
|
||||
Assert(b, uint(len(chunk.Data)) == chunk.Length,
|
||||
rtest.Assert(b, uint(len(chunk.Data)) == chunk.Length,
|
||||
"invalid length: got %d, expected %d", len(chunk.Data), chunk.Length)
|
||||
|
||||
_, err = key.Encrypt(buf2, chunk.Data)
|
||||
OK(b, err)
|
||||
_ = key.Seal(buf2[:0], nonce, chunk.Data, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +54,7 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
data := rtest.Random(23, 10<<20) // 10MiB
|
||||
rd := bytes.NewReader(data)
|
||||
|
||||
buf := make([]byte, chunker.MaxSize)
|
||||
@@ -71,6 +70,7 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||
|
||||
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
|
||||
ch := chunker.New(rd, testPol)
|
||||
nonce := crypto.NewRandomNonce()
|
||||
|
||||
for {
|
||||
chunk, err := ch.Next(buf)
|
||||
@@ -78,8 +78,7 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
|
||||
break
|
||||
}
|
||||
|
||||
// reduce length of chunkBuf
|
||||
key.Encrypt(chunk.Data, chunk.Data)
|
||||
_ = key.Seal(chunk.Data[:0], nonce, chunk.Data, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +86,7 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
data := rtest.Random(23, 10<<20) // 10MiB
|
||||
|
||||
buf := make([]byte, chunker.MaxSize)
|
||||
|
||||
@@ -108,14 +107,14 @@ func archiveDirectory(b testing.TB) {
|
||||
|
||||
arch := archiver.New(repo)
|
||||
|
||||
_, id, err := arch.Snapshot(context.TODO(), nil, []string{BenchArchiveDirectory}, nil, "localhost", nil, time.Now())
|
||||
OK(b, err)
|
||||
_, id, err := arch.Snapshot(context.TODO(), nil, []string{rtest.BenchArchiveDirectory}, nil, "localhost", nil, time.Now())
|
||||
rtest.OK(b, err)
|
||||
|
||||
b.Logf("snapshot archived as %v", id)
|
||||
}
|
||||
|
||||
func TestArchiveDirectory(t *testing.T) {
|
||||
if BenchArchiveDirectory == "" {
|
||||
if rtest.BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping TestArchiveDirectory")
|
||||
}
|
||||
|
||||
@@ -123,7 +122,7 @@ func TestArchiveDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkArchiveDirectory(b *testing.B) {
|
||||
if BenchArchiveDirectory == "" {
|
||||
if rtest.BenchArchiveDirectory == "" {
|
||||
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
|
||||
}
|
||||
|
||||
@@ -144,7 +143,7 @@ func archiveWithDedup(t testing.TB) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
if rtest.BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping TestArchiverDedup")
|
||||
}
|
||||
|
||||
@@ -155,7 +154,7 @@ func archiveWithDedup(t testing.TB) {
|
||||
}
|
||||
|
||||
// archive a few files
|
||||
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
|
||||
t.Logf("archived snapshot %v", sn.ID().Str())
|
||||
|
||||
// get archive stats
|
||||
@@ -166,7 +165,7 @@ func archiveWithDedup(t testing.TB) {
|
||||
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
|
||||
|
||||
// archive the same files again, without parent snapshot
|
||||
sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
sn2 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
|
||||
t.Logf("archived snapshot %v", sn2.ID().Str())
|
||||
|
||||
// get archive stats again
|
||||
@@ -183,7 +182,7 @@ func archiveWithDedup(t testing.TB) {
|
||||
}
|
||||
|
||||
// archive the same files again, with a parent snapshot
|
||||
sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
|
||||
sn3 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, sn2.ID())
|
||||
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
|
||||
|
||||
// get archive stats again
|
||||
@@ -246,18 +245,18 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||
}
|
||||
|
||||
for _, errChan := range errChannels {
|
||||
OK(t, <-errChan)
|
||||
rtest.OK(t, <-errChan)
|
||||
}
|
||||
|
||||
OK(t, repo.Flush())
|
||||
OK(t, repo.SaveIndex(context.TODO()))
|
||||
rtest.OK(t, repo.Flush())
|
||||
rtest.OK(t, repo.SaveIndex(context.TODO()))
|
||||
|
||||
chkr := createAndInitChecker(t, repo)
|
||||
assertNoUnreferencedPacks(t, chkr)
|
||||
}
|
||||
|
||||
func getRandomData(seed int, size int) []chunker.Chunk {
|
||||
buf := Random(seed, size)
|
||||
buf := rtest.Random(seed, size)
|
||||
var chunks []chunker.Chunk
|
||||
chunker := chunker.New(bytes.NewReader(buf), testPol)
|
||||
|
||||
@@ -292,7 +291,7 @@ func assertNoUnreferencedPacks(t *testing.T, chkr *checker.Checker) {
|
||||
go chkr.Packs(context.TODO(), errChan)
|
||||
|
||||
for err := range errChan {
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,26 +340,26 @@ func TestArchiveNameCollision(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
dir, cleanup := TempDir(t)
|
||||
dir, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
root := filepath.Join(dir, "root")
|
||||
OK(t, os.MkdirAll(root, 0755))
|
||||
rtest.OK(t, os.MkdirAll(root, 0755))
|
||||
|
||||
OK(t, ioutil.WriteFile(filepath.Join(dir, "testfile"), []byte("testfile1"), 0644))
|
||||
OK(t, ioutil.WriteFile(filepath.Join(dir, "root", "testfile"), []byte("testfile2"), 0644))
|
||||
rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "testfile"), []byte("testfile1"), 0644))
|
||||
rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "root", "testfile"), []byte("testfile2"), 0644))
|
||||
|
||||
defer chdir(t, root)()
|
||||
|
||||
arch := archiver.New(repo)
|
||||
|
||||
sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"testfile", filepath.Join("..", "testfile")}, nil, "localhost", nil, time.Now())
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
t.Logf("snapshot archived as %v", id)
|
||||
|
||||
tree, err := repo.LoadTree(context.TODO(), *sn.Tree)
|
||||
OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
if len(tree.Nodes) != 2 {
|
||||
t.Fatalf("tree has %d nodes, wanted 2: %v", len(tree.Nodes), tree.Nodes)
|
||||
|
@@ -30,7 +30,7 @@ const defaultListMaxItems = 5000
|
||||
// make sure that *Backend implements backend.Backend
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func open(cfg Config) (*Backend, error) {
|
||||
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
client, err := storage.NewBasicClient(cfg.AccountName, cfg.AccountKey)
|
||||
@@ -38,7 +38,7 @@ func open(cfg Config) (*Backend, error) {
|
||||
return nil, errors.Wrap(err, "NewBasicClient")
|
||||
}
|
||||
|
||||
client.HTTPClient = &http.Client{Transport: backend.Transport()}
|
||||
client.HTTPClient = &http.Client{Transport: rt}
|
||||
|
||||
service := client.GetBlobService()
|
||||
|
||||
@@ -63,14 +63,14 @@ func open(cfg Config) (*Backend, error) {
|
||||
}
|
||||
|
||||
// Open opens the Azure backend at specified container.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
return open(cfg)
|
||||
func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
return open(cfg, rt)
|
||||
}
|
||||
|
||||
// Create opens the Azure backend at specified container and creates the container if
|
||||
// it does not exist yet.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
be, err := open(cfg)
|
||||
func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
be, err := open(cfg, rt)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -227,13 +227,17 @@ func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
|
||||
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
debug.Log("%v", h)
|
||||
|
||||
objName := be.Filename(h)
|
||||
blob := be.container.GetBlobReference(objName)
|
||||
|
||||
if err := blob.GetProperties(nil); err != nil {
|
||||
be.sem.GetToken()
|
||||
err := blob.GetProperties(nil)
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err != nil {
|
||||
debug.Log("blob.GetProperties err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
|
||||
}
|
||||
@@ -244,7 +248,11 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
objName := be.Filename(h)
|
||||
|
||||
be.sem.GetToken()
|
||||
found, err := be.container.GetBlobReference(objName).Exists()
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -254,7 +262,11 @@ func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
objName := be.Filename(h)
|
||||
|
||||
be.sem.GetToken()
|
||||
_, err := be.container.GetBlobReference(objName).DeleteIfExists(nil)
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
|
||||
return errors.Wrap(err, "client.RemoveObject")
|
||||
}
|
||||
@@ -282,7 +294,10 @@ func (be *Backend) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
defer close(ch)
|
||||
|
||||
for {
|
||||
be.sem.GetToken()
|
||||
obj, err := be.container.ListBlobs(params)
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@@ -7,14 +7,20 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/azure"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newAzureTestSuite(t testing.TB) *test.Suite {
|
||||
tr, err := backend.Transport(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
return &test.Suite{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
@@ -37,7 +43,7 @@ func newAzureTestSuite(t testing.TB) *test.Suite {
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(azure.Config)
|
||||
|
||||
be, err := azure.Create(cfg)
|
||||
be, err := azure.Create(cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -57,23 +63,20 @@ func newAzureTestSuite(t testing.TB) *test.Suite {
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(azure.Config)
|
||||
return azure.Open(cfg)
|
||||
|
||||
return azure.Open(cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(azure.Config)
|
||||
|
||||
be, err := azure.Open(cfg)
|
||||
be, err := azure.Open(cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -81,7 +84,7 @@ func newAzureTestSuite(t testing.TB) *test.Suite {
|
||||
func TestBackendAzure(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/azure.TestBackendAzure")
|
||||
rtest.SkipDisallowed(t, "restic/backend/azure.TestBackendAzure")
|
||||
}
|
||||
}()
|
||||
|
||||
|
@@ -3,6 +3,7 @@ package b2
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
@@ -29,8 +30,8 @@ const defaultListMaxItems = 1000
|
||||
// ensure statically that *b2Backend implements restic.Backend.
|
||||
var _ restic.Backend = &b2Backend{}
|
||||
|
||||
func newClient(ctx context.Context, cfg Config) (*b2.Client, error) {
|
||||
opts := []b2.ClientOption{b2.Transport(backend.Transport())}
|
||||
func newClient(ctx context.Context, cfg Config, rt http.RoundTripper) (*b2.Client, error) {
|
||||
opts := []b2.ClientOption{b2.Transport(rt)}
|
||||
|
||||
c, err := b2.NewClient(ctx, cfg.AccountID, cfg.Key, opts...)
|
||||
if err != nil {
|
||||
@@ -40,13 +41,13 @@ func newClient(ctx context.Context, cfg Config) (*b2.Client, error) {
|
||||
}
|
||||
|
||||
// Open opens a connection to the B2 service.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
debug.Log("cfg %#v", cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
client, err := newClient(ctx, cfg)
|
||||
client, err := newClient(ctx, cfg, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -69,7 +70,8 @@ func Open(cfg Config) (restic.Backend, error) {
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
sem: sem,
|
||||
listMaxItems: defaultListMaxItems,
|
||||
sem: sem,
|
||||
}
|
||||
|
||||
return be, nil
|
||||
@@ -77,13 +79,13 @@ func Open(cfg Config) (restic.Backend, error) {
|
||||
|
||||
// Create opens a connection to the B2 service. If the bucket does not exist yet,
|
||||
// it is created.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
debug.Log("cfg %#v", cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
client, err := newClient(ctx, cfg)
|
||||
client, err := newClient(ctx, cfg, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,7 +111,8 @@ func Create(cfg Config) (restic.Backend, error) {
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
sem: sem,
|
||||
listMaxItems: defaultListMaxItems,
|
||||
sem: sem,
|
||||
}
|
||||
|
||||
present, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
@@ -134,31 +137,6 @@ func (be *b2Backend) Location() string {
|
||||
return be.cfg.Bucket
|
||||
}
|
||||
|
||||
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
|
||||
type wrapReader struct {
|
||||
io.ReadCloser
|
||||
eofSeen bool
|
||||
f func()
|
||||
}
|
||||
|
||||
func (wr *wrapReader) Read(p []byte) (int, error) {
|
||||
if wr.eofSeen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err := wr.ReadCloser.Read(p)
|
||||
if err == io.EOF {
|
||||
wr.eofSeen = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (wr *wrapReader) Close() error {
|
||||
err := wr.ReadCloser.Close()
|
||||
wr.f()
|
||||
return err
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a non-existing file.
|
||||
func (be *b2Backend) IsNotExist(err error) bool {
|
||||
return b2.IsNotExist(errors.Cause(err))
|
||||
@@ -189,14 +167,7 @@ func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offs
|
||||
|
||||
if offset == 0 && length == 0 {
|
||||
rd := obj.NewReader(ctx)
|
||||
wrapper := &wrapReader{
|
||||
ReadCloser: rd,
|
||||
f: func() {
|
||||
cancel()
|
||||
be.sem.ReleaseToken()
|
||||
},
|
||||
}
|
||||
return wrapper, nil
|
||||
return be.sem.ReleaseTokenOnClose(rd, cancel), nil
|
||||
}
|
||||
|
||||
// pass a negative length to NewRangeReader so that the remainder of the
|
||||
@@ -206,18 +177,11 @@ func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offs
|
||||
}
|
||||
|
||||
rd := obj.NewRangeReader(ctx, offset, int64(length))
|
||||
wrapper := &wrapReader{
|
||||
ReadCloser: rd,
|
||||
f: func() {
|
||||
cancel()
|
||||
be.sem.ReleaseToken()
|
||||
},
|
||||
}
|
||||
return wrapper, nil
|
||||
return be.sem.ReleaseTokenOnClose(rd, cancel), nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
|
||||
func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -232,7 +196,7 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (e
|
||||
debug.Log("Save %v, name %v", h, name)
|
||||
obj := be.bucket.Object(name)
|
||||
|
||||
_, err = obj.Attrs(ctx)
|
||||
_, err := obj.Attrs(ctx)
|
||||
if err == nil {
|
||||
debug.Log(" %v already exists", h)
|
||||
return errors.New("key already exists")
|
||||
@@ -304,19 +268,20 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType) <-chan string
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
be.sem.GetToken()
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer cancel()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
prefix := be.Dirname(restic.Handle{Type: t})
|
||||
cur := &b2.Cursor{Prefix: prefix}
|
||||
|
||||
for {
|
||||
be.sem.GetToken()
|
||||
objs, c, err := be.bucket.ListCurrentObjects(ctx, be.listMaxItems, cur)
|
||||
be.sem.ReleaseToken()
|
||||
if err != nil && err != io.EOF {
|
||||
// TODO: return err to caller once err handling in List() is improved
|
||||
debug.Log("List: %v", err)
|
||||
return
|
||||
}
|
||||
debug.Log("returned %v items", len(objs))
|
||||
|
@@ -7,14 +7,20 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/b2"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newB2TestSuite(t testing.TB) *test.Suite {
|
||||
tr, err := backend.Transport(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
return &test.Suite{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
@@ -39,28 +45,24 @@ func newB2TestSuite(t testing.TB) *test.Suite {
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(b2.Config)
|
||||
return b2.Create(cfg)
|
||||
return b2.Create(cfg, tr)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(b2.Config)
|
||||
return b2.Open(cfg)
|
||||
return b2.Open(cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(b2.Config)
|
||||
be, err := b2.Open(cfg)
|
||||
be, err := b2.Open(cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -83,7 +85,7 @@ func testVars(t testing.TB) {
|
||||
func TestBackendB2(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/b2.TestBackendB2")
|
||||
rtest.SkipDisallowed(t, "restic/backend/b2.TestBackendB2")
|
||||
}
|
||||
}()
|
||||
|
||||
|
84
internal/backend/backend_error.go
Normal file
84
internal/backend/backend_error.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// ErrorBackend is used to induce errors into various function calls and test
|
||||
// the retry functions.
|
||||
type ErrorBackend struct {
|
||||
FailSave float32
|
||||
FailSaveRead float32
|
||||
FailLoad float32
|
||||
FailStat float32
|
||||
restic.Backend
|
||||
|
||||
r *rand.Rand
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// statically ensure that ErrorBackend implements restic.Backend.
|
||||
var _ restic.Backend = &ErrorBackend{}
|
||||
|
||||
// NewErrorBackend wraps be with a backend that returns errors according to
|
||||
// given probabilities.
|
||||
func NewErrorBackend(be restic.Backend, seed int64) *ErrorBackend {
|
||||
return &ErrorBackend{
|
||||
Backend: be,
|
||||
r: rand.New(rand.NewSource(seed)),
|
||||
}
|
||||
}
|
||||
|
||||
func (be *ErrorBackend) fail(p float32) bool {
|
||||
be.m.Lock()
|
||||
v := be.r.Float32()
|
||||
be.m.Unlock()
|
||||
|
||||
return v < p
|
||||
}
|
||||
|
||||
// Save stores the data in the backend under the given handle.
|
||||
func (be *ErrorBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
if be.fail(be.FailSave) {
|
||||
return errors.Errorf("Save(%v) random error induced", h)
|
||||
}
|
||||
|
||||
if be.fail(be.FailSaveRead) {
|
||||
_, err := io.CopyN(ioutil.Discard, rd, be.r.Int63n(1000))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("Save(%v) random error with partial read induced", h)
|
||||
}
|
||||
|
||||
return be.Backend.Save(ctx, h, rd)
|
||||
}
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
// given offset. If length is larger than zero, only a portion of the file
|
||||
// is returned. rd must be closed after use. If an error is returned, the
|
||||
// ReadCloser must be nil.
|
||||
func (be *ErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
if be.fail(be.FailLoad) {
|
||||
return nil, errors.Errorf("Load(%v, %v, %v) random error induced", h, length, offset)
|
||||
}
|
||||
|
||||
return be.Backend.Load(ctx, h, length, offset)
|
||||
}
|
||||
|
||||
// Stat returns information about the File identified by h.
|
||||
func (be *ErrorBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
if be.fail(be.FailLoad) {
|
||||
return restic.FileInfo{}, errors.Errorf("Stat(%v) random error induced", h)
|
||||
}
|
||||
|
||||
return be.Stat(ctx, h)
|
||||
}
|
97
internal/backend/backend_retry.go
Normal file
97
internal/backend/backend_retry.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// RetryBackend retries operations on the backend in case of an error with a
|
||||
// backoff.
|
||||
type RetryBackend struct {
|
||||
restic.Backend
|
||||
MaxTries int
|
||||
Report func(string, error, time.Duration)
|
||||
}
|
||||
|
||||
// statically ensure that RetryBackend implements restic.Backend.
|
||||
var _ restic.Backend = &RetryBackend{}
|
||||
|
||||
// NewRetryBackend wraps be with a backend that retries operations after a
|
||||
// backoff. report is called with a description and the error, if one occurred.
|
||||
func NewRetryBackend(be restic.Backend, maxTries int, report func(string, error, time.Duration)) *RetryBackend {
|
||||
return &RetryBackend{
|
||||
Backend: be,
|
||||
MaxTries: maxTries,
|
||||
Report: report,
|
||||
}
|
||||
}
|
||||
|
||||
func (be *RetryBackend) retry(msg string, f func() error) error {
|
||||
return backoff.RetryNotify(f,
|
||||
backoff.WithMaxTries(backoff.NewExponentialBackOff(), uint64(be.MaxTries)),
|
||||
func(err error, d time.Duration) {
|
||||
if be.Report != nil {
|
||||
be.Report(msg, err, d)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Save stores the data in the backend under the given handle.
|
||||
func (be *RetryBackend) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
seeker, ok := rd.(io.Seeker)
|
||||
if !ok {
|
||||
return errors.Errorf("reader %T is not a seeker", rd)
|
||||
}
|
||||
|
||||
pos, err := seeker.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
if pos != 0 {
|
||||
return errors.Errorf("reader is not at the beginning (pos %v)", pos)
|
||||
}
|
||||
|
||||
return be.retry(fmt.Sprintf("Save(%v)", h), func() error {
|
||||
_, err := seeker.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Backend.Save(ctx, h, rd)
|
||||
})
|
||||
}
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
// given offset. If length is larger than zero, only a portion of the file
|
||||
// is returned. rd must be closed after use. If an error is returned, the
|
||||
// ReadCloser must be nil.
|
||||
func (be *RetryBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (rd io.ReadCloser, err error) {
|
||||
err = be.retry(fmt.Sprintf("Load(%v, %v, %v)", h, length, offset),
|
||||
func() error {
|
||||
var innerError error
|
||||
rd, innerError = be.Backend.Load(ctx, h, length, offset)
|
||||
|
||||
return innerError
|
||||
})
|
||||
return rd, err
|
||||
}
|
||||
|
||||
// Stat returns information about the File identified by h.
|
||||
func (be *RetryBackend) Stat(ctx context.Context, h restic.Handle) (fi restic.FileInfo, err error) {
|
||||
err = be.retry(fmt.Sprintf("Stat(%v)", h),
|
||||
func() error {
|
||||
var innerError error
|
||||
fi, innerError = be.Backend.Stat(ctx, h)
|
||||
|
||||
return innerError
|
||||
})
|
||||
return fi, err
|
||||
}
|
90
internal/backend/backend_retry_test.go
Normal file
90
internal/backend/backend_retry_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/mock"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestBackendRetrySeeker(t *testing.T) {
|
||||
be := &mock.Backend{
|
||||
SaveFn: func(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
retryBackend := RetryBackend{
|
||||
Backend: be,
|
||||
}
|
||||
|
||||
data := test.Random(24, 23*14123)
|
||||
|
||||
type wrapReader struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
var rd io.Reader
|
||||
rd = wrapReader{bytes.NewReader(data)}
|
||||
|
||||
err := retryBackend.Save(context.TODO(), restic.Handle{}, rd)
|
||||
if err == nil {
|
||||
t.Fatal("did not get expected error for retry backend with non-seeker reader")
|
||||
}
|
||||
|
||||
rd = bytes.NewReader(data)
|
||||
_, err = io.CopyN(ioutil.Discard, rd, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = retryBackend.Save(context.TODO(), restic.Handle{}, rd)
|
||||
if err == nil {
|
||||
t.Fatal("did not get expected error for partial reader")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendSaveRetry(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
errcount := 0
|
||||
be := &mock.Backend{
|
||||
SaveFn: func(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
if errcount == 0 {
|
||||
errcount++
|
||||
_, err := io.CopyN(ioutil.Discard, rd, 120)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("injected error")
|
||||
}
|
||||
|
||||
_, err := io.Copy(buf, rd)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
retryBackend := RetryBackend{
|
||||
Backend: be,
|
||||
}
|
||||
|
||||
data := test.Random(23, 5*1024*1024+11241)
|
||||
err := retryBackend.Save(context.TODO(), restic.Handle{}, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(data) != buf.Len() {
|
||||
t.Errorf("wrong number of bytes written: want %d, got %d", len(data), buf.Len())
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, buf.Bytes()) {
|
||||
t.Errorf("wrong data written to backend")
|
||||
}
|
||||
}
|
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/restic/restic/internal/options"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to an gcs compatible
|
||||
// server.
|
||||
// Config contains all configuration necessary to connect to a Google Cloud
|
||||
// Storage bucket.
|
||||
type Config struct {
|
||||
ProjectID string
|
||||
JSONKeyPath string
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// Package gs provides a restic backend for Google Cloud Storage.
|
||||
package gs
|
||||
|
||||
import (
|
||||
@@ -20,7 +21,13 @@ import (
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// Backend stores data on an gs endpoint.
|
||||
// Backend stores data in a GCS bucket.
|
||||
//
|
||||
// The service account used to access the bucket must have these permissions:
|
||||
// * storage.objects.create
|
||||
// * storage.objects.delete
|
||||
// * storage.objects.get
|
||||
// * storage.objects.list
|
||||
type Backend struct {
|
||||
service *storage.Service
|
||||
projectID string
|
||||
@@ -31,7 +38,7 @@ type Backend struct {
|
||||
backend.Layout
|
||||
}
|
||||
|
||||
// make sure that *Backend implements backend.Backend
|
||||
// Ensure that *Backend implements restic.Backend.
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func getStorageService(jsonKeyPath string) (*storage.Service, error) {
|
||||
@@ -87,28 +94,63 @@ func open(cfg Config) (*Backend, error) {
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Open opens the gs backend at bucket and region.
|
||||
// Open opens the gs backend at the specified bucket.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
return open(cfg)
|
||||
}
|
||||
|
||||
// Create opens the S3 backend at bucket and region and creates the bucket if
|
||||
// it does not exist yet.
|
||||
// Create opens the gs backend at the specified bucket and attempts to creates
|
||||
// the bucket if it does not exist yet.
|
||||
//
|
||||
// The service account must have the "storage.buckets.create" permission to
|
||||
// create a bucket the does not yet exist.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
be, err := open(cfg)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
|
||||
// Create bucket if not exists
|
||||
// Try to determine if the bucket exists. If it does not, try to create it.
|
||||
//
|
||||
// A Get call has three typical error cases:
|
||||
//
|
||||
// * nil: Bucket exists and we have access to the metadata (returned).
|
||||
//
|
||||
// * 403: Bucket exists and we do not have access to the metadata. We
|
||||
// don't have storage.buckets.get permission to the bucket, but we may
|
||||
// still be able to access objects in the bucket.
|
||||
//
|
||||
// * 404: Bucket doesn't exist.
|
||||
//
|
||||
// Determining if the bucket is accessible is best-effort because the
|
||||
// 403 case is ambiguous.
|
||||
if _, err := be.service.Buckets.Get(be.bucketName).Do(); err != nil {
|
||||
bucket := &storage.Bucket{
|
||||
Name: be.bucketName,
|
||||
gerr, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
// Don't know what to do with this error.
|
||||
return nil, errors.Wrap(err, "service.Buckets.Get")
|
||||
}
|
||||
|
||||
if _, err := be.service.Buckets.Insert(be.projectID, bucket).Do(); err != nil {
|
||||
return nil, errors.Wrap(err, "service.Buckets.Insert")
|
||||
switch gerr.Code {
|
||||
case 403:
|
||||
// Bucket exists, but we don't know if it is
|
||||
// accessible. Optimistically assume it is; if not,
|
||||
// future Backend calls will fail.
|
||||
debug.Log("Unable to determine if bucket %s is accessible (err %v). Continuing as if it is.", be.bucketName, err)
|
||||
case 404:
|
||||
// Bucket doesn't exist, try to create it.
|
||||
bucket := &storage.Bucket{
|
||||
Name: be.bucketName,
|
||||
}
|
||||
|
||||
if _, err := be.service.Buckets.Insert(be.projectID, bucket).Do(); err != nil {
|
||||
// Always an error, as the bucket definitely
|
||||
// doesn't exist.
|
||||
return nil, errors.Wrap(err, "service.Buckets.Insert")
|
||||
}
|
||||
default:
|
||||
// Don't know what to do with this error.
|
||||
return nil, errors.Wrap(err, "service.Buckets.Get")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,20 +204,48 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
|
||||
|
||||
debug.Log("Save %v at %v", h, objName)
|
||||
|
||||
be.sem.GetToken()
|
||||
|
||||
// Check key does not already exist
|
||||
if _, err := be.service.Objects.Get(be.bucketName, objName).Do(); err == nil {
|
||||
debug.Log("%v already exists", h)
|
||||
be.sem.ReleaseToken()
|
||||
return errors.New("key already exists")
|
||||
}
|
||||
|
||||
be.sem.GetToken()
|
||||
|
||||
debug.Log("InsertObject(%v, %v)", be.bucketName, objName)
|
||||
|
||||
// Set chunk size to zero to disable resumable uploads.
|
||||
//
|
||||
// With a non-zero chunk size (the default is
|
||||
// googleapi.DefaultUploadChunkSize, 8MB), Insert will buffer data from
|
||||
// rd in chunks of this size so it can upload these chunks in
|
||||
// individual requests.
|
||||
//
|
||||
// This chunking allows the library to automatically handle network
|
||||
// interruptions and re-upload only the last chunk rather than the full
|
||||
// file.
|
||||
//
|
||||
// Unfortunately, this buffering doesn't play nicely with
|
||||
// --limit-upload, which applies a rate limit to rd. This rate limit
|
||||
// ends up only limiting the read from rd into the buffer rather than
|
||||
// the network traffic itself. This results in poor network rate limit
|
||||
// behavior, where individual chunks are written to the network at full
|
||||
// bandwidth for several seconds, followed by several seconds of no
|
||||
// network traffic as the next chunk is read through the rate limiter.
|
||||
//
|
||||
// By disabling chunking, rd is passed further down the request stack,
|
||||
// where there is less (but some) buffering, which ultimately results
|
||||
// in better rate limiting behavior.
|
||||
//
|
||||
// restic typically writes small blobs (4MB-30MB), so the resumable
|
||||
// uploads are not providing significant benefit anyways.
|
||||
cs := googleapi.ChunkSize(0)
|
||||
|
||||
info, err := be.service.Objects.Insert(be.bucketName,
|
||||
&storage.Object{
|
||||
Name: objName,
|
||||
}).Media(rd).Do()
|
||||
}).Media(rd, cs).Do()
|
||||
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
@@ -254,7 +324,10 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
be.sem.GetToken()
|
||||
obj, err := be.service.Objects.Get(be.bucketName, objName).Do()
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err != nil {
|
||||
debug.Log("GetObject() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "service.Objects.Get")
|
||||
@@ -267,7 +340,11 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
|
||||
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
found := false
|
||||
objName := be.Filename(h)
|
||||
|
||||
be.sem.GetToken()
|
||||
_, err := be.service.Objects.Get(be.bucketName, objName).Do()
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
@@ -279,7 +356,10 @@ func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
||||
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
objName := be.Filename(h)
|
||||
|
||||
be.sem.GetToken()
|
||||
err := be.service.Objects.Delete(be.bucketName, objName).Do()
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if er, ok := err.(*googleapi.Error); ok {
|
||||
if er.Code == 404 {
|
||||
err = nil
|
||||
@@ -309,7 +389,10 @@ func (be *Backend) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
|
||||
listReq := be.service.Objects.List(be.bucketName).Prefix(prefix).MaxResults(int64(be.listMaxItems))
|
||||
for {
|
||||
be.sem.GetToken()
|
||||
obj, err := listReq.Do()
|
||||
be.sem.ReleaseToken()
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error listing %v: %v\n", prefix, err)
|
||||
return
|
||||
@@ -371,5 +454,5 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
// Close does nothing.
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newGSTestSuite(t testing.TB) *test.Suite {
|
||||
@@ -69,11 +69,7 @@ func newGSTestSuite(t testing.TB) *test.Suite {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -81,7 +77,7 @@ func newGSTestSuite(t testing.TB) *test.Suite {
|
||||
func TestBackendGS(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/gs.TestBackendGS")
|
||||
rtest.SkipDisallowed(t, "restic/backend/gs.TestBackendGS")
|
||||
}
|
||||
}()
|
||||
|
||||
|
@@ -1,6 +1,10 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
@@ -8,8 +12,10 @@ import (
|
||||
"github.com/restic/restic/internal/debug"
|
||||
)
|
||||
|
||||
// Transport returns a new http.RoundTripper with default settings applied.
|
||||
func Transport() http.RoundTripper {
|
||||
// Transport returns a new http.RoundTripper with default settings applied. If
|
||||
// a custom rootCertFilename is non-empty, it must point to a valid PEM file,
|
||||
// otherwise the function will return an error.
|
||||
func Transport(rootCertFilenames []string) (http.RoundTripper, error) {
|
||||
// copied from net/http
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
@@ -25,6 +31,28 @@ func Transport() http.RoundTripper {
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
if rootCertFilenames == nil {
|
||||
return debug.RoundTripper(tr), nil
|
||||
}
|
||||
|
||||
p := x509.NewCertPool()
|
||||
for _, filename := range rootCertFilenames {
|
||||
if filename == "" {
|
||||
return nil, fmt.Errorf("empty filename for root certificate supplied")
|
||||
}
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read root certificate: %v", err)
|
||||
}
|
||||
if ok := p.AppendCertsFromPEM(b); !ok {
|
||||
return nil, fmt.Errorf("cannot parse root certificate from %q", filename)
|
||||
}
|
||||
}
|
||||
|
||||
tr.TLSClientConfig = &tls.Config{
|
||||
RootCAs: p,
|
||||
}
|
||||
|
||||
// wrap in the debug round tripper
|
||||
return debug.RoundTripper(tr)
|
||||
return debug.RoundTripper(tr), nil
|
||||
}
|
||||
|
@@ -9,11 +9,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestDefaultLayout(t *testing.T) {
|
||||
tempdir, cleanup := TempDir(t)
|
||||
tempdir, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -140,7 +140,7 @@ func TestDefaultLayout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRESTLayout(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -286,7 +286,7 @@ func TestRESTLayoutURLs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestS3LegacyLayout(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -354,7 +354,7 @@ func TestS3LegacyLayout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDetectLayout(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -369,7 +369,7 @@ func TestDetectLayout(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
for _, fs := range []Filesystem{fs, nil} {
|
||||
t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) {
|
||||
SetupTarTestFixture(t, path, filepath.Join("testdata", test.filename))
|
||||
rtest.SetupTarTestFixture(t, path, filepath.Join("testdata", test.filename))
|
||||
|
||||
layout, err := DetectLayout(fs, filepath.Join(path, "repo"))
|
||||
if err != nil {
|
||||
@@ -385,14 +385,14 @@ func TestDetectLayout(t *testing.T) {
|
||||
t.Fatalf("want layout %v, got %v", test.want, layoutName)
|
||||
}
|
||||
|
||||
RemoveAll(t, filepath.Join(path, "repo"))
|
||||
rtest.RemoveAll(t, filepath.Join(path, "repo"))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLayout(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -405,7 +405,7 @@ func TestParseLayout(t *testing.T) {
|
||||
{"", "", "*backend.DefaultLayout"},
|
||||
}
|
||||
|
||||
SetupTarTestFixture(t, path, filepath.Join("testdata", "repo-layout-default.tar.gz"))
|
||||
rtest.SetupTarTestFixture(t, path, filepath.Join("testdata", "repo-layout-default.tar.gz"))
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.layoutName, func(t *testing.T) {
|
||||
@@ -432,7 +432,7 @@ func TestParseLayout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseLayoutInvalid(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var invalidNames = []string{
|
||||
|
@@ -6,11 +6,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestLayout(t *testing.T) {
|
||||
path, cleanup := TempDir(t)
|
||||
path, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
@@ -33,7 +33,7 @@ func TestLayout(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.filename, func(t *testing.T) {
|
||||
SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename))
|
||||
rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename))
|
||||
|
||||
repo := filepath.Join(path, "repo")
|
||||
be, err := Open(Config{
|
||||
@@ -75,7 +75,7 @@ func TestLayout(t *testing.T) {
|
||||
t.Errorf("Close() returned error %v", err)
|
||||
}
|
||||
|
||||
RemoveAll(t, filepath.Join(path, "repo"))
|
||||
rtest.RemoveAll(t, filepath.Join(path, "repo"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -118,12 +118,22 @@ func (b *Local) IsNotExist(err error) bool {
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (b *Local) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
|
||||
func (b *Local) Save(ctx context.Context, h restic.Handle, rd io.Reader) error {
|
||||
debug.Log("Save %v", h)
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.Type == restic.LockFile {
|
||||
lockDir := b.Dirname(h)
|
||||
if !dirExists(lockDir) {
|
||||
debug.Log("locks/ does not exist yet, creating now.")
|
||||
if err := fs.MkdirAll(lockDir, backend.Modes.Dir); err != nil {
|
||||
return errors.Wrap(err, "MkdirAll")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filename := b.Filename(h)
|
||||
|
||||
// create new file
|
||||
@@ -149,13 +159,7 @@ func (b *Local) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err er
|
||||
return errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
// set mode to read-only
|
||||
fi, err := fs.Stat(filename)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return setNewFileMode(filename, fi)
|
||||
return setNewFileMode(filename, backend.Modes.File)
|
||||
}
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
@@ -248,30 +252,34 @@ func (b *Local) List(ctx context.Context, t restic.FileType) <-chan string {
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
fs.Walk(b.Basedir(t), func(path string, fi os.FileInfo, err error) error {
|
||||
err := fs.Walk(b.Basedir(t), func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isFile(fi) {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- filepath.Base(path):
|
||||
case <-ctx.Done():
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
debug.Log("Walk %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete() error {
|
||||
func (b *Local) Delete(ctx context.Context) error {
|
||||
debug.Log("Delete()")
|
||||
return fs.RemoveAll(b.Path)
|
||||
}
|
||||
|
@@ -9,14 +9,14 @@ import (
|
||||
"github.com/restic/restic/internal/backend/local"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newTestSuite(t testing.TB) *test.Suite {
|
||||
return &test.Suite{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
dir, err := ioutil.TempDir(TestTempDir, "restic-test-local-")
|
||||
dir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-local-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -44,11 +44,11 @@ func newTestSuite(t testing.TB) *test.Suite {
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(local.Config)
|
||||
if !TestCleanupTempDirs {
|
||||
if !rtest.TestCleanupTempDirs {
|
||||
t.Logf("leaving test backend dir at %v", cfg.Path)
|
||||
}
|
||||
|
||||
RemoveAll(t, cfg.Path)
|
||||
rtest.RemoveAll(t, cfg.Path)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func removeAll(t testing.TB, dir string) {
|
||||
}
|
||||
|
||||
func TestOpenNotExistingDirectory(t *testing.T) {
|
||||
dir, cleanup := TempDir(t)
|
||||
dir, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
// local.Open must not create any files dirs in the repo
|
||||
|
@@ -9,6 +9,6 @@ import (
|
||||
)
|
||||
|
||||
// set file to readonly
|
||||
func setNewFileMode(f string, fi os.FileInfo) error {
|
||||
return fs.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
func setNewFileMode(f string, mode os.FileMode) error {
|
||||
return fs.Chmod(f, mode)
|
||||
}
|
||||
|
@@ -7,6 +7,6 @@ import (
|
||||
// We don't modify read-only on windows,
|
||||
// since it will make us unable to delete the file,
|
||||
// and this isn't common practice on this platform.
|
||||
func setNewFileMode(f string, fi os.FileInfo) error {
|
||||
func setNewFileMode(f string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
@@ -169,7 +169,7 @@ var parseTests = []struct {
|
||||
Config: s3.Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "restic",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
@@ -180,7 +180,7 @@ var parseTests = []struct {
|
||||
Config: s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "restic",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
@@ -202,7 +202,7 @@ var parseTests = []struct {
|
||||
Config: s3.Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "repo",
|
||||
Prefix: "restic",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
@@ -224,7 +224,7 @@ var parseTests = []struct {
|
||||
Config: s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "repo",
|
||||
Prefix: "restic",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
@@ -246,7 +246,7 @@ var parseTests = []struct {
|
||||
Config: s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "repo",
|
||||
Prefix: "restic",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
},
|
||||
|
@@ -31,8 +31,8 @@ type restBackend struct {
|
||||
}
|
||||
|
||||
// Open opens the REST backend with the given config.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
client := &http.Client{Transport: backend.Transport()}
|
||||
func Open(cfg Config, rt http.RoundTripper) (*restBackend, error) {
|
||||
client := &http.Client{Transport: rt}
|
||||
|
||||
sem, err := backend.NewSemaphore(cfg.Connections)
|
||||
if err != nil {
|
||||
@@ -56,8 +56,8 @@ func Open(cfg Config) (restic.Backend, error) {
|
||||
}
|
||||
|
||||
// Create creates a new REST on server configured in config.
|
||||
func Create(cfg Config) (restic.Backend, error) {
|
||||
be, err := Open(cfg)
|
||||
func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
be, err := Open(cfg, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func Create(cfg Config) (restic.Backend, error) {
|
||||
values.Set("create", "true")
|
||||
url.RawQuery = values.Encode()
|
||||
|
||||
resp, err := http.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
|
||||
resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader(""))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -349,3 +349,38 @@ func (b *restBackend) Close() error {
|
||||
// same function.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove keys for a specified backend type.
|
||||
func (b *restBackend) removeKeys(ctx context.Context, t restic.FileType) error {
|
||||
for key := range b.List(ctx, restic.DataFile) {
|
||||
err := b.Remove(ctx, restic.Handle{Type: restic.DataFile, Name: key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes all data in the backend.
|
||||
func (b *restBackend) Delete(ctx context.Context) error {
|
||||
alltypes := []restic.FileType{
|
||||
restic.DataFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
restic.SnapshotFile,
|
||||
restic.IndexFile}
|
||||
|
||||
for _, t := range alltypes {
|
||||
err := b.removeKeys(ctx, t)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && b.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -10,10 +10,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/rest"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
. "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func runRESTServer(ctx context.Context, t testing.TB, dir string) func() {
|
||||
@@ -61,10 +62,15 @@ func runRESTServer(ctx context.Context, t testing.TB, dir string) func() {
|
||||
}
|
||||
|
||||
func newTestSuite(ctx context.Context, t testing.TB) *test.Suite {
|
||||
tr, err := backend.Transport(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
return &test.Suite{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
dir, err := ioutil.TempDir(TestTempDir, "restic-test-rest-")
|
||||
dir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-rest-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -84,13 +90,13 @@ func newTestSuite(ctx context.Context, t testing.TB) *test.Suite {
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(rest.Config)
|
||||
return rest.Create(cfg)
|
||||
return rest.Create(cfg, tr)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(rest.Config)
|
||||
return rest.Open(cfg)
|
||||
return rest.Open(cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
@@ -103,14 +109,14 @@ func newTestSuite(ctx context.Context, t testing.TB) *test.Suite {
|
||||
func TestBackendREST(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/rest.TestBackendREST")
|
||||
rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST")
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dir, cleanup := TempDir(t)
|
||||
dir, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
cleanup = runRESTServer(ctx, t, dir)
|
||||
@@ -123,7 +129,7 @@ func BenchmarkBackendREST(t *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dir, cleanup := TempDir(t)
|
||||
dir, cleanup := rtest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
cleanup = runRESTServer(ctx, t, dir)
|
||||
|
@@ -34,11 +34,9 @@ func init() {
|
||||
options.Register("s3", Config{})
|
||||
}
|
||||
|
||||
const defaultPrefix = "restic"
|
||||
|
||||
// ParseConfig parses the string s and extracts the s3 config. The two
|
||||
// supported configuration formats are s3://host/bucketname/prefix and
|
||||
// s3:host:bucketname/prefix. The host can also be a valid s3 region
|
||||
// s3:host/bucketname/prefix. The host can also be a valid s3 region
|
||||
// name. If no prefix is given the prefix "restic" will be used.
|
||||
func ParseConfig(s string) (interface{}, error) {
|
||||
switch {
|
||||
@@ -71,15 +69,15 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
}
|
||||
|
||||
func createConfig(endpoint string, p []string, useHTTP bool) (interface{}, error) {
|
||||
var prefix string
|
||||
switch {
|
||||
case len(p) < 1:
|
||||
if len(p) < 1 {
|
||||
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
|
||||
case len(p) == 1 || p[1] == "":
|
||||
prefix = defaultPrefix
|
||||
default:
|
||||
}
|
||||
|
||||
var prefix string
|
||||
if len(p) > 1 && p[1] != "" {
|
||||
prefix = path.Clean(p[1])
|
||||
}
|
||||
|
||||
cfg := NewConfig()
|
||||
cfg.Endpoint = endpoint
|
||||
cfg.UseHTTP = useHTTP
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user