Compare commits

..

354 Commits

Author SHA1 Message Date
Alexander Neumann
9da6e7c329 wip 2024-04-07 09:58:44 +02:00
Alexander Neumann
b3f38686ee wip 2024-03-03 14:19:22 +01:00
Alexander Neumann
bd3022c504 wip 2024-02-25 17:26:55 +01:00
Alexander Neumann
057b56a1b6 Rework for recent codebase 2024-02-25 11:01:59 +01:00
Alexander Weiss
1de9b82850 Add webdav using the fuse implementation 2024-02-17 20:45:47 +01:00
Michael Eischer
6fbb470835 Merge pull request #4665 from MichaelEischer/check-repair-packs
check: Suggest usage of `repair packs` if pack files are damaged
2024-02-17 15:54:07 +00:00
Michael Eischer
0a36d193d8 add changelog for enhanced repair packs 2024-02-12 21:43:35 +01:00
Michael Eischer
69304cd74f check: clarify repair pack usage 2024-02-12 21:43:35 +01:00
Michael Eischer
c3b0e6d004 Merge pull request #4700 from MichaelEischer/remove-duplicate-changelog-entries
remove changelogs that are already included in restic 0.16.4
2024-02-12 21:40:04 +01:00
Michael Eischer
9e3703ded5 remove changelogs that are already included in restic 0.16.4 2024-02-12 20:39:31 +01:00
Michael Eischer
527a3ff2b2 check: link to troubleshooting guide 2024-02-12 20:25:15 +01:00
Michael Eischer
ed4a4f8748 check: exclude inaccessible files from the repair pack suggestion 2024-02-12 20:25:15 +01:00
Michael Eischer
4073299a7c check: fix missing error if blob is invalid 2024-02-12 20:20:13 +01:00
Michael Eischer
6397615fbb check: document that check will show repair pack instructions 2024-02-12 20:20:13 +01:00
Michael Eischer
544fe38786 check: suggest repair pack for all damaged packs 2024-02-12 20:20:13 +01:00
Michael Eischer
772e3416d1 repair pack: drop feature flag 2024-02-12 20:20:12 +01:00
Michael Eischer
22a3cea1b3 checker: wrap all pack errors in ErrPackData 2024-02-12 20:19:32 +01:00
Michael Eischer
19bf2cf52d Merge pull request #4697 from MichaelEischer/report-blob-errors
backup: report files whose chunks failed to upload
2024-02-12 20:18:51 +01:00
Michael Eischer
5b5d506472 backup: report files whose chunks failed to upload 2024-02-11 22:43:26 +01:00
Michael Eischer
dde556e8e8 Merge pull request #4696 from MichaelEischer/fix-exclude-load-error-msg
backup: improve error message if exclude file cannot be loaded
2024-02-11 21:35:31 +00:00
Michael Eischer
ee1ff3c1d0 backup: improve error message if exclude file cannot be loaded 2024-02-11 22:26:13 +01:00
Michael Eischer
667a2f5369 Merge pull request #4694 from restic/update-go-versions
Update go versions
2024-02-11 17:10:12 +00:00
Michael Eischer
2ab18a92e6 CI: keep tests for Go 1.19 2024-02-10 23:42:34 +01:00
Alexander Neumann
c0514dd8ba Fix linter errors (except for tests) 2024-02-10 22:58:10 +01:00
Alexander Neumann
a8cda0119c Upgrade golangci-lint 2024-02-10 22:08:43 +01:00
Alexander Neumann
9720935c56 Update Go version for tests to 1.22 2024-02-10 21:56:01 +01:00
Michael Eischer
68cc327b15 Merge pull request #4692 from 27149chen/dump-to-existing-file
feat: dump flag --target should be allowed to write existing file
2024-02-10 17:44:54 +00:00
Michael Eischer
15d6fa1f83 dump: update docs for --target option 2024-02-10 18:39:06 +01:00
lou
80db02fc35 dump flag --target should be allowed to write existing file
Signed-off-by: lou <alex1988@outlook.com>
2024-02-10 18:39:06 +01:00
Michael Eischer
6a2b10e2a8 Merge pull request #4685 from konidev20/fix-gh-4676-sub-commands-for-key-management
Move key add, list, remove and passwd as separate sub-commands and improve key sub-command documentation
2024-02-08 19:59:16 +00:00
Michael Eischer
e46b21ab80 key: fix integration test for invalid arguments 2024-02-08 20:52:30 +01:00
Michael Eischer
eb389a2d25 Merge pull request #4687 from MichaelEischer/upgrade-zstd-library
Upgrade zstd library to latest version
2024-02-08 20:29:13 +01:00
Srigovind Nayak
795d33b3ee key: move add, list, remove, passwd to sub-commands
docs: improve the sub-command docs

changelog: add the unreleased changelog for the key command updates

key: update integration tests
2024-02-06 01:47:43 +05:30
Michael Eischer
0cffdb7493 Merge pull request #4682 from konidev20/feat-add-target-for-dump-command
feat: set --target for dump command
2024-02-05 19:16:42 +00:00
Michael Eischer
f5ffa40652 dump: minor cleanups 2024-02-05 20:10:52 +01:00
Srigovind Nayak
175c14b5c9 dump: add --target option 2024-02-05 20:10:52 +01:00
Michael Eischer
bca099ac7f Upgrade zstd library to latest version
The data corruption bug is fixed, thus remove the override.
2024-02-05 19:53:02 +01:00
Michael Eischer
0f09a8870c Merge pull request #4684 from konidev20/fix-gh-4658-update-azure-documentation
Update azure storage account authentication documentation
2024-02-04 20:13:18 +00:00
Srigovind Nayak
5771c4ecfb docs: update environment variables for az login to azure backend 2024-02-05 01:32:43 +05:30
Michael Eischer
b63bfd2257 Merge branch 'patch-release' 2024-02-04 20:21:42 +01:00
Alexander Neumann
0f9fa44de5 Set development version for 0.16.4 2024-02-04 19:50:56 +01:00
Alexander Neumann
3786536dc1 Add version for 0.16.4 2024-02-04 19:50:52 +01:00
Alexander Neumann
811be5984d Update manpages and auto-completion 2024-02-04 19:50:51 +01:00
Alexander Neumann
b0ead75de5 Generate CHANGELOG.md for 0.16.4 2024-02-04 19:50:34 +01:00
Alexander Neumann
6cd2804bff Prepare changelog for 0.16.4 2024-02-04 19:50:34 +01:00
Michael Eischer
a72c2b74f3 Apply changelog entry / documentation improvements from review 2024-02-04 19:10:06 +01:00
Michael Eischer
261b1455c7 add documentation for --no-extra-verify option 2024-02-04 19:10:06 +01:00
Michael Eischer
2a0bd2b637 rename --no-verify-pack to --no-extra-verify 2024-02-04 19:10:05 +01:00
Michael Eischer
4589da7eb9 add data verification changelog entry 2024-02-04 19:09:49 +01:00
Michael Eischer
75e72d826c pack: verify integrity of pack file header 2024-02-04 19:09:49 +01:00
Michael Eischer
d8916bc3d9 repository: ask users to report corrupted data while saving blobs 2024-02-04 19:09:49 +01:00
Michael Eischer
dc11d012bb Make --no-verify-pack globally available
Verifying all blobs before upload comes with a notable performance
impact. Allow users to skip it if necessary.
2024-02-04 19:09:49 +01:00
Michael Eischer
8ef5425351 repository: test verification of blobs/unpacked data 2024-02-04 19:09:46 +01:00
Michael Eischer
885431ec2b repository: Allow skipping verification for tests
Some tests have to explicitly create pack files with blobs that don't
match their ID. For those blobs the builtin verification of the
repository must be disabled.
2024-02-04 19:08:30 +01:00
Michael Eischer
cb85fb46dd backup: verify unpacked files before upload 2024-02-04 19:07:48 +01:00
Michael Eischer
2f30c940b2 backup: verify blobs before upload
This only covers the blobs themselves, the pack header is not verified
so far. Unpacked files are also not covered by the integrity check.
2024-02-04 19:07:48 +01:00
Michael Eischer
0ea62b5ac6 repository: make repo.Options configurable for test repos 2024-02-04 19:07:46 +01:00
Michael Eischer
29e1caf825 add changelog draft for data corruption on max compression 2024-02-04 19:05:51 +01:00
Michael Eischer
0164f5310d Downgrade klauspost/compress to fix data corruption at max. compression 2024-02-04 19:05:50 +01:00
Michael Eischer
d5e662315a Merge pull request #4681 from MichaelEischer/verify-integrity-on-upload
backup: verify blobs before upload
2024-02-04 18:04:27 +00:00
Michael Eischer
effe76aaf5 Merge pull request #4679 from MichaelEischer/workaround-compression-bug
Downgrade klauspost/compress to fix data corruption at max. compression
2024-02-04 18:03:34 +00:00
Michael Eischer
5957417b1f Apply changelog entry / documentation improvements from review 2024-02-04 18:55:41 +01:00
Michael Eischer
219d8e3c18 add changelog draft for data corruption on max compression 2024-02-04 18:11:48 +01:00
Michael Eischer
a737fe1e47 add documentation for --no-extra-verify option 2024-02-04 17:11:49 +01:00
Michael Eischer
86b38a0b17 rename --no-verify-pack to --no-extra-verify 2024-02-04 17:01:05 +01:00
Michael Eischer
7d31180fe6 add data verification changelog entry 2024-02-04 15:48:11 +01:00
Michael Eischer
c32e5e2abb pack: verify integrity of pack file header 2024-02-04 15:31:42 +01:00
Michael Eischer
c97a271e89 repository: ask users to report corrupted data while saving blobs 2024-02-04 15:31:42 +01:00
Michael Eischer
66e8971659 Make --no-verify-pack globally available
Verifying all blobs before upload comes with a notable performance
impact. Allow users to skip it if necessary.
2024-02-04 15:31:42 +01:00
Michael Eischer
193140525c repository: test verification of blobs/unpacked data 2024-02-04 15:31:42 +01:00
Michael Eischer
96518d7c4a Merge pull request #4674 from restic/dependabot/go_modules/cloud.google.com/go/storage-1.37.0
build(deps): bump cloud.google.com/go/storage from 1.34.0 to 1.37.0
2024-02-03 17:23:49 +00:00
Michael Eischer
2dbb18128c repository: Allow skipping verification for tests
Some tests have to explicitly create pack files with blobs that don't
match their ID. For those blobs the builtin verification of the
repository must be disabled.
2024-02-03 18:22:47 +01:00
Michael Eischer
30a84e9003 backup: verify unpacked files before upload 2024-02-03 18:22:47 +01:00
Michael Eischer
c01a0c6da7 backup: verify blobs before upload
This only covers the blobs themselves, the pack header is not verified
so far. Unpacked files are also not covered by the integrity check.
2024-02-03 18:22:47 +01:00
Michael Eischer
16e3f79e8b repository: make repo.Options configurable for test repos 2024-02-03 18:22:47 +01:00
Michael Eischer
bb92b487f7 repository: fix repack test 2024-02-03 18:22:47 +01:00
dependabot[bot]
cf7cad11de build(deps): bump cloud.google.com/go/storage from 1.34.0 to 1.37.0
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.34.0 to 1.37.0.
- [Release notes](https://github.com/googleapis/google-cloud-go/releases)
- [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.34.0...spanner/v1.37.0)

---
updated-dependencies:
- dependency-name: cloud.google.com/go/storage
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-02 20:40:57 +00:00
Michael Eischer
370d9c31f4 Merge pull request #4671 from restic/dependabot/go_modules/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob-1.2.1
build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob from 1.2.0 to 1.2.1
2024-02-02 20:31:16 +00:00
Michael Eischer
6581133e85 Merge pull request #4675 from restic/dependabot/go_modules/golang.org/x/oauth2-0.16.0
build(deps): bump golang.org/x/oauth2 from 0.15.0 to 0.16.0
2024-02-02 20:29:39 +00:00
Michael Eischer
207a4a5e8e Downgrade klauspost/compress to fix data corruption at max. compression 2024-02-02 20:10:29 +01:00
Michael Eischer
cbf9cd4a7f Merge pull request #4670 from joram-berger/patch-1
Link to Go Match syntax directly in 040_backup.rst
2024-02-02 18:40:13 +00:00
dependabot[bot]
552f01662b build(deps): bump golang.org/x/oauth2 from 0.15.0 to 0.16.0
Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.15.0 to 0.16.0.
- [Commits](https://github.com/golang/oauth2/compare/v0.15.0...v0.16.0)

---
updated-dependencies:
- dependency-name: golang.org/x/oauth2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-01 01:53:12 +00:00
dependabot[bot]
7f5ea511bc build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.2.0 to 1.2.1.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v1.2...sdk/azidentity/v1.2.1)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-01 01:51:32 +00:00
Joram Berger
b07afa9b02 Link to Go Match syntax directly in 040_backup.rst
The docs linked to filepath.Glob (which is used) but the syntax is described in the documentation of filepath.Match. So it makes sense to link that directly.
2024-01-31 23:40:21 +01:00
Michael Eischer
8b08b522c9 Merge pull request #4669 from MichaelEischer/fix-rewrite-typo
rewrite: fix typo in dry-run output
2024-01-31 22:08:12 +01:00
Michael Eischer
eaf9659efc Merge pull request #4657 from numerigraphe/fix-key-add-id
Properly report the ID of newly added keys
2024-01-31 20:59:41 +00:00
Michael Eischer
ba136ff60c rewrite: fix typo in dry-run output 2024-01-31 21:48:37 +01:00
Lionel Sausin
8fbe328371 Properly report the ID of newly added keys
Other commands like key list and key remove show the key's ID.

Showing the ID here lets users easily reuse the ID as a key hint for subsequent
commands.
In particular, a key hint is needed when the repository has many keys - otherwise
opening the repository may fail with "Fatal: maximum number of keys reached" even
when a proper password is provided.

Fixes #4656
2024-01-29 10:12:49 +01:00
Michael Eischer
4273e06a43 Merge pull request #4662 from MichaelEischer/clarify-backup-ignore-inode
backup: clarify that --ignore-inode also ignores ctime
2024-01-27 18:25:11 +01:00
Michael Eischer
248c144f72 Merge pull request #4663 from MichaelEischer/key-subcommand-bugfix
key: return an error if subcommand is unknown
2024-01-27 18:24:50 +01:00
Michael Eischer
765729d009 key: return an error if subcommand is unknown 2024-01-27 15:33:49 +01:00
Michael Eischer
a09d51d96c backup: clarify that --ignore-inode also ignores ctime 2024-01-27 13:42:29 +01:00
Michael Eischer
e44e4b00a6 Merge pull request #4550 from ndecker/ls-ncdu
Ls ncdu
2024-01-27 12:27:35 +00:00
Michael Eischer
10e71af759 describe ls command in docs 2024-01-27 13:22:00 +01:00
Michael Eischer
c90f24a06c Merge pull request #4641 from MichaelEischer/reduce-restic-repository-usage
Misc cleanups
2024-01-27 13:18:20 +01:00
Michael Eischer
d4ed7c8858 walker: add tests for leaveDir 2024-01-27 13:17:33 +01:00
Michael Eischer
2c80cfa4a5 walker: fix missing leaveDir if directory is partially skipped 2024-01-27 13:17:33 +01:00
Michael Eischer
261737abc8 ls: only allow either --json or --ncdu 2024-01-27 13:17:33 +01:00
Michael Eischer
a2f2f8fb4c fix linter warning 2024-01-27 13:17:33 +01:00
Michael Eischer
4bae54d040 ls: test ncdu output format 2024-01-27 13:17:33 +01:00
Michael Eischer
509b339d54 ls: correctly handle setuid/setgit/sticky bit in ncdu output 2024-01-27 13:17:33 +01:00
Michael Eischer
a2fe337610 ls: unify printer implementations 2024-01-27 13:17:33 +01:00
Michael Eischer
1b008c92d3 ls: rework ncdu output to use walker.LeaveDir 2024-01-27 13:17:33 +01:00
Michael Eischer
9ecbda059c walker: add callback to inform about leaving a directory 2024-01-27 13:17:32 +01:00
Nils Decker
b2703a4089 add changelog for ls --ncdu 2024-01-27 13:17:15 +01:00
Nils Decker
a9310948cf command ls: add option for ncdu output
NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories.
It has an option to save a directory tree and analyse it later.
This patch adds an output option to the ls command.

A snapshot can be seen with

`restic ls latest --ncdu | ncdu -f -`

- https://dev.yorhel.nl/ncdu
2024-01-27 13:06:52 +01:00
Michael Eischer
246559e654 check: cleanup s3 legacy detection 2024-01-27 13:02:04 +01:00
Michael Eischer
1dfd854769 lock: switch to repository.List 2024-01-27 13:02:04 +01:00
Michael Eischer
bfb56b78e1 replace some usages of restic.Repository with more specific interface
This should eventually make it easier to test the code.
2024-01-27 13:02:02 +01:00
Michael Eischer
3424088274 Merge pull request #4644 from MichaelEischer/refactor-repair-packs
Refactor and test `repair packs`
2024-01-27 13:00:51 +01:00
Michael Eischer
724ec179e3 Merge pull request #4648 from MichaelEischer/repository-removekey
repository: Introduce RemoveKey function
2024-01-27 13:00:42 +01:00
Michael Eischer
f0e1ad2285 fix linter warning 2024-01-27 12:51:45 +01:00
Michael Eischer
fd579421dd repository: deduplicate test 2024-01-27 12:51:45 +01:00
Michael Eischer
42c9318b9c repair pack: add tests 2024-01-27 12:51:45 +01:00
Michael Eischer
764b0bacd6 repair pack: add support for truncated files 2024-01-27 12:51:45 +01:00
Michael Eischer
7c351bc53c repair pack: reenable auto index updates
The method is not available on the restic.Repository interface that is
used for testing. Drop the call as a small amount of additional index
writes is not a problem.
2024-01-27 12:51:45 +01:00
Michael Eischer
feeab84204 repair pack: extract the repair logic into the repository package
Currently, the cmd/restic package contains a significant amount of code
that modifies repository internals. This code should in the mid-term
move into the repository package.
2024-01-27 12:51:45 +01:00
Michael Eischer
d7a50fe739 properly show termstatus progress bar if visible less than one frame
If a progress bar using termstatus was only visible for less than one
frame, then its output could be lost.
2024-01-27 12:51:40 +01:00
Michael Eischer
6b65a495b1 backup/restore: fix termstatus initialization
The termstatus must only be canceled once the command has returned.
Otherwise output may be lost when the context gets canceled.
2024-01-27 12:51:08 +01:00
Michael Eischer
d26d2d41f8 backup/restore: extract termstatus initialization 2024-01-27 12:51:08 +01:00
Michael Eischer
cb50832d50 index: let MasterIndex.Save also delete obsolete indexes 2024-01-27 12:51:08 +01:00
Michael Eischer
bedff1ed6d split deleteFiles into UI and logic parts 2024-01-27 12:51:08 +01:00
Michael Eischer
c13bf0b607 repository: Introduce RemoveKey function
This replaces directly removing keys via the backend.
2024-01-27 12:42:58 +01:00
Michael Eischer
25ac1549e7 Merge pull request #4661 from MichaelEischer/clarify-contributing
CONTRIBUTING.md: Clarify handling of small bugfixes
2024-01-26 23:04:58 +00:00
Michael Eischer
ae9683336d CONTRIBUTING.md: Clarify handling of small bugfixes
Opening an issue for a small bugfix is usually not useful. It primarily
adds overhead.
2024-01-26 23:51:54 +01:00
Michael Eischer
446167ae80 Merge pull request #4643 from MichaelEischer/remove-redundant-poly1305-mask
Remove redundant poly1305 key masking
2024-01-23 19:46:40 +01:00
Michael Eischer
5b36c4eb5f Merge pull request #4647 from MichaelEischer/reduce-globals
Remove all usages of the global command-specific options
2024-01-23 19:46:15 +01:00
Michael Eischer
1419baf67a Merge pull request #4645 from MichaelEischer/improve-lock-checking
lock: checkForOtherLocks processes each lock at most once
2024-01-23 19:46:00 +01:00
Michael Eischer
66103aea3d Remove all usages of the global command-specific options
Now, every command uses an options struct, which is passed to the run*
function by the command.RunE method.
2024-01-23 19:21:39 +01:00
Michael Eischer
79f2939eb9 Merge pull request #4654 from adrian5/docfix
docs: fix formatting
2024-01-23 18:08:59 +00:00
Michael Eischer
0e2ee06803 Merge pull request #4650 from MichaelEischer/improve-stdin-from-command-description
backup: Improve help text for `--stdin-from-command`
2024-01-23 19:22:15 +01:00
Michael Eischer
2927982256 Merge pull request #4649 from MichaelEischer/simplify-termstatus-shutdown
ui/termstatus: simplify cleaning up on termination
2024-01-23 19:16:21 +01:00
Michael Eischer
6cc2bec5dd apply suggestion from review 2024-01-23 19:09:04 +01:00
Michael Eischer
18806944f6 doc: remove blockquotes from unordered lists 2024-01-23 19:03:54 +01:00
adrian5
609f84e095 docs: fix formatting 2024-01-22 21:12:12 +01:00
Michael Eischer
767c2539a0 backup: Improve help text for --stdin-from-command 2024-01-21 22:06:54 +01:00
Michael Eischer
6bdca13603 ui/termstatus: simplify cleaning up on termination
`writeStatus` also cleans no longer used status lines.
The old code actually cleaned one line too much. However, as that line
was never used it makes no difference.
2024-01-21 21:27:27 +01:00
Michael Eischer
f1f34eb3e5 lock: checkForOtherLocks processes each lock at most once
If a lock could not be loaded, then restic would check all lock files
again. These repeated checks are not useful as the status of a lock file
cannot change unless its ID changes too. Thus, skip already check lock
files on retries.
2024-01-20 22:40:12 +01:00
Michael Eischer
fee83e1c09 Remove redundant poly1305 key masking
The implementation in crypto/poly1305 already performs the exact same
masking.
2024-01-20 12:36:59 +01:00
Michael Eischer
6696195f38 Merge pull request #4584 from elkemper/fix-stop-archiving-metadata
S3: Don't archive metadata files on S3 Glacier
2024-01-20 10:30:37 +00:00
Michael Eischer
a763a5c67d s3: minor cleanups for archive storage class handling 2024-01-20 11:25:28 +01:00
Vladislav Belous
8ca58b487c S3: do not set storage class for metadata when using archive storage 2024-01-20 11:04:15 +01:00
Michael Eischer
62111f4379 Merge pull request #4625 from MichaelEischer/refactor-streampacks
Refactor repository.StreamPacks
2024-01-19 21:48:37 +01:00
Michael Eischer
2c310a526e repository: Replace StreamPack function with LoadBlobsFromPack method
LoadBlobsFromPack is now part of the repository struct. This ensures
that users of that method don't have to deal will internals of the
repository implementation.

The filerestorer tests now also contain far fewer pack file
implementation details.
2024-01-19 21:40:43 +01:00
Michael Eischer
6b7b5c89e9 repository: prepare StreamPack refactor 2024-01-19 21:40:43 +01:00
Michael Eischer
22d0c3f8dc check: Use PackBlobIterator instead of StreamPack
To only stream the content of a pack file once, check used StreamPack
with a custom pack load function. This combination was always brittle
and complicates using StreamPack everywhere else. Now that StreamPack
internally uses PackBlobIterator use that primitive instead, which is a
much better fit for what the check command requires.
2024-01-19 21:40:36 +01:00
Michael Eischer
fb422497af repository: split StreamPack implementation
Move the actual decoding of the pack data into a separate iterator.
2024-01-19 21:39:55 +01:00
Michael Eischer
54c5c72e5a Merge pull request #4616 from MichaelEischer/fix-rest-connection-close
rest: fix and cleanup closing of http response body
2024-01-19 21:31:35 +01:00
Michael Eischer
5f49eec655 Merge pull request #4615 from MichaelEischer/fix-find-empty-dirs
walker: Remove ignoreTrees functionality
2024-01-19 21:25:40 +01:00
Michael Eischer
ec13105093 Merge pull request #4623 from MichaelEischer/docs-verify-release-binaries
Add documentation for the verify-release-binaries.sh script
2024-01-19 21:17:38 +01:00
Michael Eischer
bd883caae1 CI: enable bodyclose linter 2024-01-19 21:17:18 +01:00
Michael Eischer
b1a8fd1d03 rest: fix and cleanup closing of http response body
If client.Do returns an error, then there's no body that has to be
closed. For requests for which we are not interested in the response
body, immediately drain and close the body to make sure it isn't
forgotten later on.

This change in particular adds the missing `Close()` call for the
`List()` command.
2024-01-19 21:17:17 +01:00
Michael Eischer
fdcbb53017 walker: test skipping for root node 2024-01-19 21:16:06 +01:00
Michael Eischer
0b39940fdb walker: Remove ignoreTrees functionality
It was only used in two places:
- stats: apparently as a minor performance optimization, which is
  unlikely to be important
- find: filtered directories would be ignored. However, this
  optimization missed that it is possible that two directories have the
  exact same content. Such directories would be incorrectly ignored too.
  Example:
```
mkdir test test/a test/b
restic backup test
restic find latest test/b
-> incorrectly does not return anything
```

Thus, remove the functionality as it's apparently too complex to use
correctly.
2024-01-19 21:16:06 +01:00
Michael Eischer
147b0e54cb Merge pull request #4639 from northben/patch-1
Update Backblaze documentation
2024-01-19 20:08:48 +00:00
Ben Northway
5413877d33 Update Backblaze documentation
clarify documentation regarding B2 bucket lifecycle settings. The default lifecycle setting is probably fine for most users now; a custom policy is not necessary.
2024-01-18 16:41:01 -06:00
Michael Eischer
03e06d0797 Merge branch 'patch-release' 2024-01-14 21:38:17 +01:00
Alexander Neumann
0ec9383ba2 Set development version for 0.16.3 2024-01-14 20:21:45 +01:00
Alexander Neumann
abca112404 Add version for 0.16.3 2024-01-14 20:21:45 +01:00
Alexander Neumann
b70b94507a Generate CHANGELOG.md for 0.16.3 2024-01-14 20:21:19 +01:00
Alexander Neumann
d987582594 Prepare changelog for 0.16.3 2024-01-14 20:21:19 +01:00
Leo R. Lundgren
ef2e473b99 doc: Polish changelogs 2024-01-10 00:19:07 +01:00
Michael Eischer
7b2de84763 Merge pull request #4618 from MichaelEischer/workaround-rclone-list-errors
rclone: Workaround for incorrect "not found" errors while listing files
2024-01-09 18:28:31 +01:00
Michael Eischer
e4bbde7036 rclone: Workaround for incorrect "not found" errors while listing files
rclone returns a "not found" error if an internal error occurs while
listing a folder. Ignoring this error lets restic erroneously think that
there are no files, which can cause `prune` to wipe the whole
repository.
2024-01-09 18:28:17 +01:00
Michael Eischer
ec0fb46f6c add changelog for reliable restores 2024-01-09 18:27:48 +01:00
Michael Eischer
103beb96bc restore: separately restore blobs that are frequently referenced
Writing these blobs to their files can take a long time and consequently
cause the backend connection to time out. Avoid that by retrieving these
blobs separately.
2024-01-09 18:27:48 +01:00
Michael Eischer
f0f89d7f27 restore: split error reporting from downloadPack 2024-01-09 18:27:48 +01:00
Michael Eischer
cf352ccafb restore: cleanup downloadPack 2024-01-09 18:27:48 +01:00
Michael Eischer
b856e9489a restore: split downloadPack into smaller methods 2024-01-09 18:27:48 +01:00
Michael Eischer
c31e9418ba Merge pull request #4626 from MichaelEischer/reliable-large-restores
Improve reliability of large restores
2024-01-09 18:23:09 +01:00
Michael Eischer
2e8de9edfd rclone: Workaround for incorrect "not found" errors while listing files
rclone returns a "not found" error if an internal error occurs while
listing a folder. Ignoring this error lets restic erroneously think that
there are no files, which can cause `prune` to wipe the whole
repository.
2024-01-09 18:20:16 +01:00
Michael Eischer
ce7db90e08 sync CI and go dependencies with master branch 2024-01-08 21:33:05 +01:00
Michael Eischer
620518aec6 add changelog for better restore error reporting 2024-01-08 21:33:05 +01:00
Michael Eischer
f2fafbffaa restore: only report errors for blobs that actually failed to load
Previously, errors would be reported for all blobs of a packfile that
failed to stream. Now, only the not yet processed blobs are reported.
2024-01-08 21:33:05 +01:00
Michael Eischer
7a3a884874 repository: test that StreamPack only delivers blobs once 2024-01-08 21:33:05 +01:00
Michael Eischer
772a907533 repository: StreamPack delivers blobs at most once
If an error occurred while streaming a pack file, this could result in
passing some of the blobs multiple times to the callback function. This
significantly complicates using StreamPack correctly and is unnecessary.
Retries do not change the content of a blob and thus only deliver the
same result over and over again.
2024-01-08 21:33:05 +01:00
Michael Eischer
a9446c1184 add changelog for irregular files on windows 2024-01-08 21:33:05 +01:00
Michael Eischer
1bab29c336 archiver: Add filepath to error message if it is not included yet 2024-01-08 21:33:05 +01:00
Michael Eischer
e886c3f6b2 archiver: improve error message for irregular files
Since Go 1.21, most reparse points are considered as irregular files.
Depending on the underlying driver these can exhibit nearly arbitrary
behavior. When encountering such a file, restic returned an
indecipherable error message: `error: invalid node type ""`.

Add the filepath to the error message and state that the file type is
not supported.
2024-01-08 21:33:05 +01:00
Michael Eischer
c95de54726 restic: cleanup node type determination
os.ModeCharDevice is already included in os.ModeType
2024-01-08 21:33:05 +01:00
Michael Eischer
d4b8abd3e2 fix deduplicated files on windows 2024-01-08 21:33:05 +01:00
Joram Berger
948ab3ccaf Add a note that the oldest snapshot may be kept additionally
Documentation enhancement.
2024-01-08 21:33:05 +01:00
Markus Zoppelt
bb0c923298 docs: add pkgx install option
PR in pkgx pantry: https://github.com/pkgxdev/pantry/pull/4098

restic pkg:
https://pkgx.dev/pkgs/restic.net/restic/
2024-01-08 21:33:05 +01:00
Michael Eischer
ff0c975443 regenerate changelog 2024-01-08 21:33:05 +01:00
Michael Eischer
7e61e117d6 cleanup changelog whitespace 2024-01-08 21:33:05 +01:00
mmattel
220a28582e Add a table of contents (TOC) to the changelog template 2024-01-08 21:33:05 +01:00
Giuseppe D'Andrea
f44fd73230 docs: fix typo in working with repos
When using the `copy` command, `--from-password-file` and `--from-password-command` flags are used to specify the password of the source repository, not of the destination repository.
2024-01-08 21:33:05 +01:00
Joda Stößer
76bd975e03 docs(scripting): correct stats output comment to be about the correct command
not about the snapshots command
2024-01-08 21:33:05 +01:00
Quang-Linh LE
64b7aed362 docs: Mention progress for restore command. This is available after https://github.com/restic/restic/pull/3991 2024-01-08 21:33:05 +01:00
Michael Kuhn
3fa6b2de4a Fix repository not being printed when using repository file
When using `RESTIC_REPOSITORY_FILE` in combination with `restic init`,
the repository is missing in the output:
```
$ restic init
created restic repository 3c872be20f at
[...]
```
This is due to the code using `gopts.Repo`, which is empty in this case.
2024-01-08 21:33:05 +01:00
Michael Eischer
5cd000f4b0 CI: update golangci-lint
Necessary to properly support Go 1.21.
2024-01-08 21:33:05 +01:00
Michael Eischer
4ea3796455 add changelog for reliable restores 2024-01-08 21:03:10 +01:00
Michael Eischer
e78be75d1e restore: separately restore blobs that are frequently referenced
Writing these blobs to their files can take a long time and consequently
cause the backend connection to time out. Avoid that by retrieving these
blobs separately.
2024-01-08 21:00:13 +01:00
Michael Eischer
2267910418 restore: split error reporting from downloadPack 2024-01-08 20:57:00 +01:00
Michael Eischer
00d18b7a88 restore: cleanup downloadPack 2024-01-08 20:53:08 +01:00
Michael Eischer
9328f34d43 restore: split downloadPack into smaller methods 2024-01-08 20:52:36 +01:00
Michael Eischer
77434c6e2b Merge pull request #4474 from ekristen/aws-assume-role
Allow AWS Assume Role
2024-01-08 19:07:17 +00:00
Michael Eischer
4248c6c3ca s3: update documentation 2024-01-07 19:30:11 +01:00
Michael Eischer
e4a7eb09ef Merge pull request #4624 from MichaelEischer/better-restorer-error-reporting
Improver restorer error reporting
2024-01-07 11:20:29 +01:00
Michael Eischer
f8b4e932ef Merge pull request #4620 from MichaelEischer/improve-irregular-file-handling
Improve irregular file handling
2024-01-07 11:12:07 +01:00
Michael Eischer
100872308f add changelog for better restore error reporting 2024-01-07 11:06:42 +01:00
Michael Eischer
dac3508170 restore: only report errors for blobs that actually failed to load
Previously, errors would be reported for all blobs of a packfile that
failed to stream. Now, only the not yet processed blobs are reported.
2024-01-07 10:54:56 +01:00
Michael Eischer
77b1c52673 repository: test that StreamPack only delivers blobs once 2024-01-07 10:54:53 +01:00
Michael Eischer
fe5c337ca2 repository: StreamPack delivers blobs at most once
If an error occurred while streaming a pack file, this could result in
passing some of the blobs multiple times to the callback function. This
significantly complicates using StreamPack correctly and is unnecessary.
Retries do not change the content of a blob and thus only deliver the
same result over and over again.
2024-01-07 10:54:49 +01:00
Michael Eischer
3e29f8dddf add changelog for irregular files on windows 2024-01-07 10:52:12 +01:00
Michael Eischer
76f507c775 Merge pull request #4621 from MichaelEischer/fix-windows-dedup-files
Fix backup of deduplicated files on windows
2024-01-07 10:44:50 +01:00
Michael Eischer
6ef23b401b fix deduplicated files on windows 2024-01-07 10:23:31 +01:00
Michael Eischer
62f99a3b2f Add documentation for the verify-release-binaries.sh script 2024-01-07 10:16:30 +01:00
Michael Eischer
0360e540af Merge pull request #4622 from MichaelEischer/fix-outdated-windows-import
termstatus: update import path of golang.org/x/term
2024-01-06 23:24:16 +01:00
Michael Eischer
e6dfefba13 termstatus: update import path of golang.org/x/term 2024-01-06 21:59:26 +01:00
Michael Eischer
02bc73f5eb s3: minor code cleanups 2024-01-06 21:44:53 +01:00
Michael Eischer
20cf4777cb s3: check for EnvAWS credentials before Static credentials
EnvAWS considers more environment variables, including AWS_SESSION_TOKEN
and thus should be checked first.
2024-01-06 21:43:47 +01:00
Erik Kristensen
5ffb536aae feat: support AWS assume role 2024-01-06 21:19:58 +01:00
Michael Eischer
1604922360 Merge pull request #4527 from adamantike/cmd/copy/prefix-hostname-to-snapshot-paths
cmd: Add hostname to snapshot display output
2024-01-06 19:32:14 +00:00
Michael Eischer
c7844530d8 update docs 2024-01-06 20:25:24 +01:00
Michael Eischer
33b7c84a7a deduplicate string formatting of snapshot metadata
This removes the spurious ")" bracket at the end and normalizes the
metadata format used by the `ls` command.
2024-01-06 20:20:51 +01:00
Michael Manganiello
045aa64558 cmd/copy: Prefix hostname to snapshot display output
This change better resembles the output generated by `Snapshot.String()`,
which includes both username and hostname.

Closes #4506

Before:

```
$ restic copy --from-repo /srv/restic-repo
repository 3666882b opened (version 2, compression level auto)
repository 0085c387 opened (version 2, compression level auto)
created new cache in /home/mike/.cache/restic
[0:00] 100.00%  1 / 1 index files loaded
[0:00]          0 index files loaded

snapshot 32b39a20 of [/home/mike/data] at 2023-10-21 16:01:13.979948154 -0300 -03)
  copy started, this may take a while...
[0:00] 100.00%  1 / 1 packs copied
snapshot 10331fdd saved
```

After:

```
$ restic copy --from-repo /srv/restic-repo
repository 3666882b opened (version 2, compression level auto)
repository 0085c387 opened (version 2, compression level auto)
[0:00] 100.00%  1 / 1 index files loaded
[0:00]          0 index files loaded

snapshot 32b39a20 of [/home/mike/data] at 2023-10-21 16:01:13.979948154 -0300 -03 by mike@desktop)
  copy started, this may take a while...
[0:00] 100.00%  1 / 1 packs copied
snapshot a67bd1ee saved
```
2024-01-06 20:20:46 +01:00
Michael Eischer
b2b7669ca0 Merge pull request #4526 from dnnr/detect-bitrot-in-diff
Add bitrot detection to "diff" command
2024-01-06 19:18:34 +00:00
Michael Eischer
4f6b1bb6f6 diff: document limitations regarding metadata 2024-01-06 20:12:47 +01:00
Michael Eischer
3549635243 diff: copy nodes before modifying them for bitrot detection 2024-01-06 20:12:47 +01:00
Daniel Danner
a7dc18e697 Add bitrot detection to "diff" command
This introduces a new modifier to the output of the diff command. It
appears whenever two files being compared only differ in their content
but not in their metadata. As far as we know, under normal
circumstances, this should only ever happen if some kind of bitrot has
happened in the source file. The prerequisite for this detection to work
is that the right-side snapshot of the comparison has been created with
"backup --force".
2024-01-06 20:12:47 +01:00
Michael Eischer
51419c51d3 archiver: Add filepath to error message if it is not included yet 2024-01-06 19:08:24 +01:00
Michael Eischer
6b79834cc8 archiver: improve error message for irregular files
Since Go 1.21, most reparse points are considered as irregular files.
Depending on the underlying driver these can exhibit nearly arbitrary
behavior. When encountering such a file, restic returned an
indecipherable error message: `error: invalid node type ""`.

Add the filepath to the error message and state that the file type is
not supported.
2024-01-06 19:03:11 +01:00
Michael Eischer
0018bb7854 restic: cleanup node type determination
os.ModeCharDevice is already included in os.ModeType
2024-01-06 18:43:16 +01:00
Michael Eischer
634e2a46d9 Merge pull request #4608 from restic/dependabot/go_modules/github.com/minio/minio-go/v7-7.0.66
build(deps): bump github.com/minio/minio-go/v7 from 7.0.63 to 7.0.66
2024-01-06 11:40:02 +00:00
Michael Eischer
dfcab92db2 Merge pull request #4609 from restic/dependabot/go_modules/github.com/Azure/azure-sdk-for-go/sdk/azcore-1.9.1
build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.8.0 to 1.9.1
2024-01-06 11:06:07 +00:00
dependabot[bot]
3666eef76c build(deps): bump github.com/minio/minio-go/v7 from 7.0.63 to 7.0.66
Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.63 to 7.0.66.
- [Release notes](https://github.com/minio/minio-go/releases)
- [Commits](https://github.com/minio/minio-go/compare/v7.0.63...v7.0.66)

---
updated-dependencies:
- dependency-name: github.com/minio/minio-go/v7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-06 11:03:52 +00:00
Michael Eischer
3a61622dfe Merge pull request #4610 from restic/dependabot/github_actions/actions/setup-go-5
build(deps): bump actions/setup-go from 4 to 5
2024-01-06 11:01:07 +00:00
Michael Eischer
5c4fca76df Merge pull request #4607 from restic/dependabot/go_modules/golang.org/x/crypto-0.17.0
build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0
2024-01-06 10:56:40 +00:00
Michael Eischer
98da0bdd12 Merge pull request #4606 from restic/dependabot/go_modules/github.com/klauspost/compress-1.17.4
build(deps): bump github.com/klauspost/compress from 1.17.2 to 1.17.4
2024-01-06 10:55:00 +00:00
dependabot[bot]
2c60dd97ae build(deps): bump actions/setup-go from 4 to 5
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-01 01:33:20 +00:00
dependabot[bot]
40905403f4 build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore
Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.8.0 to 1.9.1.
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.8.0...sdk/azcore/v1.9.1)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-01 01:18:36 +00:00
dependabot[bot]
7e7cbe8e19 build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-01 01:18:08 +00:00
dependabot[bot]
44646c20be build(deps): bump github.com/klauspost/compress from 1.17.2 to 1.17.4
Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.2 to 1.17.4.
- [Release notes](https://github.com/klauspost/compress/releases)
- [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml)
- [Commits](https://github.com/klauspost/compress/compare/v1.17.2...v1.17.4)

---
updated-dependencies:
- dependency-name: github.com/klauspost/compress
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-01 01:17:57 +00:00
Michael Eischer
8f9a35779e Merge pull request #4600 from MichaelEischer/doc-forget-note-1
Add a note that the oldest snapshot may be kept additionally
2023-12-29 17:56:54 +01:00
Michael Eischer
23e1b4bbb1 Merge pull request #4573 from gab50000/rewrite_time
Rewrite metadata
2023-12-24 14:42:00 +00:00
Michael Eischer
01b33734ab rewrite: update command output in docs 2023-12-24 15:36:22 +01:00
Michael Eischer
649a6409ee rewrite: cleanup tests 2023-12-24 15:36:22 +01:00
Michael Eischer
c31f5f986c rewrite: Minor cleanups 2023-12-24 15:36:22 +01:00
Michael Eischer
2730d05fce rewrite: Don't walk snapshot content if only metadata is modified 2023-12-24 15:36:22 +01:00
Michael Eischer
893d0d6325 rewrite: cleanup new metadata options and fix no parameters check 2023-12-24 15:36:22 +01:00
Gabriel Kabbe
7de97d7480 rewrite: Add documentation 2023-12-24 15:36:22 +01:00
Gabriel Kabbe
004520a238 rewrite: Add changelog 2023-12-24 15:36:22 +01:00
Gabriel Kabbe
a02d8d75c2 rewrite: Implement rewriting metadata 2023-12-24 15:36:22 +01:00
Gabriel Kabbe
7bf38b6c50 rewrite: Add test TestRewriteMetadata 2023-12-24 15:36:22 +01:00
Gabriel Kabbe
da1704b2d5 rewrite: Add tests
Pass nil instead of metadata to existing tests
2023-12-24 15:36:19 +01:00
Gabriel Kabbe
3026baea07 rewrite: Add structs for tracking metadata changes
Adds

  * snapshotMetadataArgs, which holds the new metadata as strings parsed from
    the command line

  * snapshotMetadata, which holds the new metadata converted to the
    correct types
2023-12-24 14:43:07 +01:00
Michael Eischer
1196c72819 Merge pull request #4570 from MarkusZoppelt/docs/pkgx
docs: add pkgx install option
2023-12-24 11:04:59 +00:00
Michael Eischer
433dd92959 Merge pull request #4590 from renard/optimize-mount-failure
mount: detect mountpoint does not exist before opening the repository
2023-12-24 10:59:56 +00:00
Markus Zoppelt
c14740c50f docs: add pkgx install option
PR in pkgx pantry: https://github.com/pkgxdev/pantry/pull/4098

restic pkg:
https://pkgx.dev/pkgs/restic.net/restic/
2023-12-24 11:59:12 +01:00
Michael Eischer
5537460664 Merge pull request #4598 from MichaelEischer/add_table_of_contents
Add table of contents
2023-12-24 11:56:05 +01:00
Sébastien Gross
f7587be28f mount: detect mountpoint does not exist before opening the repository
Bug #1681 suggests that restic should not be nice to user and should
refrain from creating a mountpoint if it does not exist. Nevertheless,
it currently opens the repository before checking for the mountpoint's
existence. In the case of large or remote repositories, this process
can be time-consuming, delaying the inevitable outcome.

    /restic mount --repo=REMOTE --verbose /tmp/backup
    repository 33f14e42 opened (version 2, compression level max)
    [0:38] 100.00%  162 / 162 index files loaded
    Mountpoint /tmp/backup doesn't exist
    stat /tmp/backup: no such file or directory

    real	0m39.534s
    user	1m53.961s
    sys	0m3.044s

In this scenario, 40 seconds could have been saved if the nonexistence
of the path had been verified beforehand.

This patch relocates the mountpoint check to the beginning of the
runMount function, preceding the opening of the repository.

    /restic mount --repo=REMOTE --verbose /tmp/backup
    Mountpoint /tmp/backup doesn't exist
    stat /tmp/backup: no such file or directory

    real	0m0.136s
    user	0m0.018s
    sys	0m0.027s

Signed-off-by: Sébastien Gross <seb•ɑƬ•chezwam•ɖɵʈ•org>
2023-12-24 11:54:18 +01:00
Michael Eischer
91fb703756 regenerate changelog 2023-12-24 11:47:31 +01:00
Michael Eischer
d7ff862b8d cleanup changelog whitespace 2023-12-24 11:47:31 +01:00
mmattel
db1d920c80 Add a table of contents (TOC) to the changelog template 2023-12-24 11:47:31 +01:00
Michael Eischer
c6299f8dbd Merge pull request #4582 from giuseppedandrea/docs/fix-typo
docs: fix typo in working with repos
2023-12-24 00:09:59 +00:00
Giuseppe D'Andrea
a128976014 docs: fix typo in working with repos
When using the `copy` command, `--from-password-file` and `--from-password-command` flags are used to specify the password of the source repository, not of the destination repository.
2023-12-24 01:04:36 +01:00
Michael Eischer
e2f6109a52 Merge pull request #4580 from restic/dependabot/go_modules/golang.org/x/oauth2-0.15.0
build(deps): bump golang.org/x/oauth2 from 0.13.0 to 0.15.0
2023-12-24 00:02:30 +00:00
Michael Eischer
30e6ed038c Merge pull request #4586 from Gelma/typos
Fix typos
2023-12-23 13:21:50 +00:00
Michael Eischer
e96d1ee33e Merge pull request #4593 from SimJoSt/patch-1
docs(scripting): correct stats output comment to be about the correct command
2023-12-23 13:09:20 +00:00
dependabot[bot]
0054db394f build(deps): bump golang.org/x/oauth2 from 0.13.0 to 0.15.0
Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.13.0 to 0.15.0.
- [Commits](https://github.com/golang/oauth2/compare/v0.13.0...v0.15.0)

---
updated-dependencies:
- dependency-name: golang.org/x/oauth2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-23 13:08:29 +00:00
Michael Eischer
53ebe91a50 Move changelog typo fixes to changelog files 2023-12-23 14:05:47 +01:00
Michael Eischer
9ceaea34dd Merge pull request #4572 from linktohack/patch-1
docs: Mention progress for restore command. This is available after https://github.com/restic/restic/pull/3991
2023-12-23 13:00:32 +00:00
Michael Eischer
356b7aac16 Merge pull request #4571 from michaelkuhn/init-repo-file
Fix repository not being printed when using repository file
2023-12-23 12:59:06 +00:00
Joda Stößer
eef7c65655 docs(scripting): correct stats output comment to be about the correct command
not about the snapshots command
2023-12-23 13:56:40 +01:00
Michael Eischer
97b8629336 Merge pull request #4579 from restic/dependabot/go_modules/golang.org/x/sync-0.5.0
build(deps): bump golang.org/x/sync from 0.4.0 to 0.5.0
2023-12-23 12:51:52 +00:00
Michael Eischer
d2ecd6bef2 Merge pull request #4577 from restic/dependabot/go_modules/golang.org/x/time-0.5.0
build(deps): bump golang.org/x/time from 0.3.0 to 0.5.0
2023-12-23 12:50:59 +00:00
Michael Eischer
634750a732 Merge pull request #4576 from restic/dependabot/github_actions/docker/login-action-3d58c274f17dffee475a5520cbe67f0a882c4dbb
build(deps): bump docker/login-action from 1f401f745bf57e30b3a2800ad308a87d2ebdf14b to 3d58c274f17dffee475a5520cbe67f0a882c4dbb
2023-12-23 12:50:52 +00:00
Quang-Linh LE
c554825e2d docs: Mention progress for restore command. This is available after https://github.com/restic/restic/pull/3991 2023-12-23 13:50:20 +01:00
Michael Kuhn
fd2fb233aa Fix repository not being printed when using repository file
When using `RESTIC_REPOSITORY_FILE` in combination with `restic init`,
the repository is missing in the output:
```
$ restic init
created restic repository 3c872be20f at
[...]
```
This is due to the code using `gopts.Repo`, which is empty in this case.
2023-12-23 13:49:22 +01:00
Michael Eischer
da4e3edbbc Merge pull request #4596 from MichaelEischer/update-golangcilint
CI: update golangci-lint
2023-12-23 13:48:33 +01:00
Michael Eischer
dbbd31bc3a CI: update golangci-lint
Necessary to properly support Go 1.21.
2023-12-23 13:41:30 +01:00
Joram Berger
12af20e606 Add a note that the oldest snapshot may be kept additionally
Documentation enhancement.
2023-12-18 18:24:57 +01:00
Andrea Gelmini
241916d55b Fix typos 2023-12-06 13:11:55 +01:00
dependabot[bot]
427b90cf82 build(deps): bump golang.org/x/sync from 0.4.0 to 0.5.0
Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.4.0 to 0.5.0.
- [Commits](https://github.com/golang/sync/compare/v0.4.0...v0.5.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-01 01:15:09 +00:00
dependabot[bot]
eec6e014f4 build(deps): bump golang.org/x/time from 0.3.0 to 0.5.0
Bumps [golang.org/x/time](https://github.com/golang/time) from 0.3.0 to 0.5.0.
- [Commits](https://github.com/golang/time/compare/v0.3.0...v0.5.0)

---
updated-dependencies:
- dependency-name: golang.org/x/time
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-01 01:14:53 +00:00
dependabot[bot]
fa46a47e22 build(deps): bump docker/login-action
Bumps [docker/login-action](https://github.com/docker/login-action) from 1f401f745bf57e30b3a2800ad308a87d2ebdf14b to 3d58c274f17dffee475a5520cbe67f0a882c4dbb.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](1f401f745b...3d58c274f1)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-01 01:02:16 +00:00
Michael Eischer
b72de5a883 Merge branch 'patch-release' 2023-11-12 11:52:21 +01:00
Michael Eischer
6086ae4ca7 Merge pull request #4563 from smlx/mac-signature
fix: drop reference to signature and define MAC
2023-11-08 21:59:13 +00:00
Scott Leggett
aeaf527be1 fix: drop reference to signature and define MAC
Poly1305-AES is not a signature, so don't mention that.

In addition, the term MAC was used without being defined, so add a
definition.

Signed-off-by: Scott Leggett <scott@sl.id.au>
2023-11-06 20:12:42 +08:00
Michael Eischer
19068aa82f Merge pull request #4559 from DRON-666/html-zip
Restore generation of `HTMLZip` files on `readthedocs.com`.
2023-11-02 20:53:12 +00:00
Michael Eischer
81ca9d28f2 Merge pull request #4553 from CommanderRoot/add-version-json
Add --json option to version command
2023-11-01 21:58:38 +00:00
Tobias Speicher
ce53ea32c6 Split go_target into go_os and go_arch 2023-11-01 22:43:38 +01:00
Tobias Speicher
10cbc169c1 Use different function to be more consistent with other code 2023-11-01 22:18:37 +01:00
DRON-666
03f8f494e9 doc: add HTMLZip format to .readthedocs.yaml 2023-11-02 00:16:47 +03:00
Tobias Speicher
ab23d033b6 Add version command output to JSON format documentation 2023-11-01 22:13:57 +01:00
Tobias Speicher
6f1efcb28b Update wording on changelog entry 2023-11-01 22:12:19 +01:00
Michael Eischer
9c399e55e3 Merge pull request #4554 from restic/dependabot/github_actions/docker/login-action-1f401f745bf57e30b3a2800ad308a87d2ebdf14b
build(deps): bump docker/login-action from b4bedf8053341df3b5a9f9e0f2cf4e79e27360c6 to 1f401f745bf57e30b3a2800ad308a87d2ebdf14b
2023-11-01 20:42:11 +00:00
Michael Eischer
e550bc0713 Merge pull request #4555 from restic/dependabot/go_modules/google.golang.org/api-0.149.0
build(deps): bump google.golang.org/api from 0.148.0 to 0.149.0
2023-11-01 20:40:45 +00:00
Michael Eischer
28aa9826af Merge pull request #4557 from restic/dependabot/go_modules/cloud.google.com/go/storage-1.34.0
build(deps): bump cloud.google.com/go/storage from 1.33.0 to 1.34.0
2023-11-01 20:40:40 +00:00
dependabot[bot]
6dde019ac8 build(deps): bump cloud.google.com/go/storage from 1.33.0 to 1.34.0
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.33.0 to 1.34.0.
- [Release notes](https://github.com/googleapis/google-cloud-go/releases)
- [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.33.0...spanner/v1.34.0)

---
updated-dependencies:
- dependency-name: cloud.google.com/go/storage
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-11-01 01:45:02 +00:00
dependabot[bot]
c2f9e21d3c build(deps): bump google.golang.org/api from 0.148.0 to 0.149.0
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.148.0 to 0.149.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.148.0...v0.149.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-11-01 01:44:28 +00:00
dependabot[bot]
67e6b9104a build(deps): bump docker/login-action
Bumps [docker/login-action](https://github.com/docker/login-action) from b4bedf8053341df3b5a9f9e0f2cf4e79e27360c6 to 1f401f745bf57e30b3a2800ad308a87d2ebdf14b.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](b4bedf8053...1f401f745b)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-11-01 01:25:22 +00:00
Tobias Speicher
6ca07ee004 add changelog/unreleases for issue-4547 2023-10-31 19:39:52 +01:00
Tobias Speicher
d45cc52468 command version: add json output option 2023-10-31 19:31:07 +01:00
Alexander Neumann
59fe24cb2b Set development version for 0.16.2 2023-10-29 19:50:12 +01:00
Alexander Neumann
1a5efcf680 Add version for 0.16.2 2023-10-29 19:50:07 +01:00
Alexander Neumann
d33fe6dd3c Generate CHANGELOG.md for 0.16.2 2023-10-29 19:50:04 +01:00
Alexander Neumann
c8dd95f104 Prepare changelog for 0.16.2 2023-10-29 19:50:04 +01:00
Leo R. Lundgren
7d980b469d doc: Polish changelogs 2023-10-29 00:44:53 +02:00
Michael Eischer
d863234e3e add changelog for missing documentation 2023-10-28 20:25:24 +02:00
Michael Eischer
42ab3ea2b9 Merge pull request #4410 from Enrico204/restic-stdin-command
add --stdin-from-command flag to backup command
2023-10-27 22:09:06 +00:00
Michael Eischer
be28a02626 doc: tweak description for --stdin-from-command 2023-10-27 23:58:52 +02:00
Michael Eischer
5d152c7720 extend changelog for --stdin-from-command 2023-10-27 23:58:52 +02:00
Michael Eischer
ee305e6041 backup: rework error reporting for subcommand 2023-10-27 23:58:52 +02:00
Michael Eischer
8bceb8e359 fs: add tests for CommandReader 2023-10-27 23:58:52 +02:00
Michael Eischer
317144c1d6 fs: merge command startup into CommandReader 2023-10-27 23:58:51 +02:00
Michael Eischer
7d879705ad fs: cleanup CommandReader implementation 2023-10-27 23:58:51 +02:00
Enrico204
37a312e505 restic-from-command: use standard behavior when no output and exit code 0 from command
The behavior of the new option should reflect the behavior of normal backups: when the command exit code is zero and there is no output in the stdout, emit a warning but create the snapshot. This commit fixes the integration tests and the ReadCloserCommand struct.
2023-10-27 23:58:51 +02:00
Enrico204
c0ca54dc8a restic-from-command: add tests 2023-10-27 23:58:51 +02:00
Enrico204
81f8d473df restic-from-command: abort snapshot on non-zero exit codes 2023-10-27 23:58:51 +02:00
Enrico204
6990b0122e Add issue-4251 (stdin-from-command) in the changelog 2023-10-27 23:58:51 +02:00
Enrico204
072b227544 stdin-from-command: add documentation in backup sub-command 2023-10-27 23:58:51 +02:00
Enrico204
4e5caab114 stdin-from-command: implemented suggestions in #4254
The code has been refactored so that the archiver is back to the original code, and the stderr is handled using a go routine to avoid deadlock.
2023-10-27 23:58:51 +02:00
Sebastian Hoß
c133065a9f Check command result before snapshotting
Return with an error containing the stderr of the given command in case it fails. No new snapshot will be created and future prune operations on the repository will remove the unreferenced data.

Signed-off-by: Sebastian Hoß <seb@xn--ho-hia.de>
2023-10-27 23:58:51 +02:00
Sebastian Hoß
25350a9c55 Extend SnapshotOptions w/ command data
In order to determine whether to save a snapshot, we need to capture the exit code returned by a command. In order to provide a nice error message, we supply stderr as well.

Signed-off-by: Sebastian Hoß <seb@xn--ho-hia.de>
2023-10-27 23:58:51 +02:00
Sebastian Hoß
a2b76ff34f Start command from --stdin-from-command
It acts similar to --stdin but reads its data from the stdout of the given command instead of os.Stdin.

Signed-off-by: Sebastian Hoß <seb@xn--ho-hia.de>
2023-10-27 23:58:51 +02:00
Sebastian Hoß
333fe1c3cf Align Stdin and StdinCommand in conditionals
In order to run with --stdin-from-command we need to short-circuit some functions similar to how it is handled for the --stdin flag. The only difference here is that --stdin-from-command actually expects that len(args) should be greater 0 whereas --stdin does not expect any args at all.

Signed-off-by: Sebastian Hoß <seb@xn--ho-hia.de>
2023-10-27 23:58:51 +02:00
Sebastian Hoß
a8657bde68 Add --stdin-from-command option
This new flag is added to the backup subcommand in order to allow restic to control the execution of a command and determine whether to save a snapshot if the given command succeeds.

Signed-off-by: Sebastian Hoß <seb@xn--ho-hia.de>
2023-10-27 23:58:51 +02:00
Michael Eischer
104107886a Merge pull request #4503 from MichaelEischer/fix-stats-with-hardlinks
stats: Fix hardlink tracking across multiple filesystems
2023-10-27 23:53:03 +02:00
Michael Eischer
731b3a4357 stats: fix hardlink tracking in a snapshot
inodes are only unique within a device. Use the HardlinkIndex from the
restorer instead of the custom (broken) hashmap to correctly account for
both inode and deviceID.
2023-10-27 23:40:42 +02:00
Michael Eischer
a8fdcf79b7 restorer: Make hardlink index generic
This will allow reusing it for the stats command without regressing the
memory usage due to storing an unnecessary file path.
2023-10-27 23:40:42 +02:00
Michael Eischer
45962c2847 Merge pull request #4499 from MichaelEischer/modular-backend-code
Split backend code from restic package
2023-10-27 20:19:20 +02:00
Michael Eischer
4be45de1c2 Restore support for ARMv5 platforms 2023-10-27 19:52:14 +02:00
Leo R. Lundgren
8c1125fe13 doc: Correct two typos 2023-10-27 19:48:00 +02:00
Martin Michlmayr
0b6ccea461 Fix typos 2023-10-27 19:48:00 +02:00
Martin Michlmayr
de6135351e Format option correctly 2023-10-27 19:48:00 +02:00
Michael Eischer
d47581b25e verify-release-binaries.sh: don't show warning if binaries are correct 2023-10-27 19:48:00 +02:00
Michael Eischer
69dec02a14 Remove readthedocs special case from docs configuration
Apparently it's now required to bring your own theme.
2023-10-27 19:47:19 +02:00
Michael Eischer
826d880614 Fix doc path typo in readthedocs configuration 2023-10-27 19:47:15 +02:00
Michael Eischer
dbf7ef72b9 Add read the docs config file version 2
The config file is by now necessary to build documentation:
https://blog.readthedocs.com/migrate-configuration-v2/
2023-10-27 19:47:11 +02:00
Michael Eischer
50ef01131a Merge pull request #4542 from MichaelEischer/support-armv6
Only support ARMv6 on ARM platforms
2023-10-27 19:42:49 +02:00
rawtaz
6be3a8fe51 Merge pull request #4539 from tbm/docs
Fix typos in docs
2023-10-27 17:30:08 +00:00
Michael Eischer
5166bde386 Only support ARMv6 on ARM platforms
Go 1.21 has switched the default from GOARM=5 to GOARM=7. So far there
have been complaints from Raspberry Pi 1 users, as the first raspberry
pi version only supports ARMv6. Exclude older ARM versions as these are
likely not relevant (rest-server also only supports ARMv6/7) and enforce
the use of software floating point emulation.
2023-10-27 19:12:12 +02:00
Leo R. Lundgren
aafb806a8c doc: Correct two typos 2023-10-27 18:56:32 +02:00
Martin Michlmayr
41e6a02bcc Fix typos 2023-10-27 18:56:32 +02:00
Martin Michlmayr
b51fe2fb69 Format option correctly 2023-10-27 18:56:32 +02:00
Michael Eischer
56537fb48e Merge pull request #4545 from restic/fix-rtd3
Try to fix documentation build
2023-10-26 22:00:20 +02:00
Michael Eischer
feea567868 Remove readthedocs special case from docs configuration
Apparently it's now required to bring your own theme.
2023-10-26 21:56:36 +02:00
Michael Eischer
2968b52f84 Merge pull request #4543 from MichaelEischer/fix-rtd2
Fix doc path typo in readthedocs configuration
2023-10-26 20:40:45 +02:00
Michael Eischer
619e80d7cc Fix doc path typo in readthedocs configuration 2023-10-26 20:39:43 +02:00
Michael Eischer
3804e50d64 Merge pull request #4541 from MichaelEischer/fix-binary-check-script
verify-release-binaries.sh: don't show warning if binaries are correct
2023-10-26 20:37:56 +02:00
Michael Eischer
c19e39968f verify-release-binaries.sh: don't show warning if binaries are correct 2023-10-26 19:59:27 +02:00
Michael Eischer
550be5c1e9 Merge pull request #4538 from MichaelEischer/fix-rtd
Add read the docs config file version 2
2023-10-26 19:42:36 +02:00
Michael Eischer
249605843b prune: get backend connection count via repository 2023-10-25 23:01:54 +02:00
Michael Eischer
c7b770eb1f convert MemorizeList to be repository based
Ideally, code that uses a repository shouldn't directly interact with
the underlying backend. Thus, move MemorizeList one layer up.
2023-10-25 23:01:35 +02:00
Michael Eischer
1b8a67fe76 move Backend interface to backend package 2023-10-25 23:00:18 +02:00
Michael Eischer
ceb0774af1 backend: make LoadAll independent of restic package 2023-10-25 22:58:39 +02:00
Michael Eischer
b6d79bdf6f restic: decouple restic.Handle 2023-10-25 22:54:07 +02:00
Michael Eischer
7881309d63 backend: move backend implementation helpers to util package
This removes code that is only used within a backend implementation from
the backend package. The latter now only contains code that also has
external users.
2023-10-25 22:54:07 +02:00
Michael Eischer
8e6fdf5edf Merge pull request #4520 from awannabeengineer/load-retry-nonexistent
retry: Do not retry Load() if file does not exist
2023-10-25 20:42:05 +00:00
Michael Eischer
c2e3e8d6ea Add read the docs config file version 2
The config file is by now necessary to build documentation:
https://blog.readthedocs.com/migrate-configuration-v2/
2023-10-25 22:00:42 +02:00
Alexander Neumann
27ec320eae Set development version for 0.16.1 2023-10-24 20:02:58 +02:00
Arash Farr
d15ffd9c92 retry: Do not retry Load() if file does not exist 2023-10-22 13:25:32 -05:00
318 changed files with 8708 additions and 4965 deletions

View File

@@ -25,7 +25,7 @@ jobs:
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@b4bedf8053341df3b5a9f9e0f2cf4e79e27360c6
uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}

View File

@@ -13,7 +13,7 @@ permissions:
contents: read
env:
latest_go: "1.21.x"
latest_go: "1.22.x"
GO111MODULE: on
jobs:
@@ -23,27 +23,32 @@ jobs:
# list of jobs to run:
include:
- job_name: Windows
go: 1.21.x
go: 1.22.x
os: windows-latest
- job_name: macOS
go: 1.21.x
go: 1.22.x
os: macOS-latest
test_fuse: false
- job_name: Linux
go: 1.21.x
go: 1.22.x
os: ubuntu-latest
test_cloud_backends: true
test_fuse: true
check_changelog: true
- job_name: Linux (race)
go: 1.21.x
go: 1.22.x
os: ubuntu-latest
test_fuse: true
test_opts: "-race"
- job_name: Linux
go: 1.21.x
os: ubuntu-latest
test_fuse: true
- job_name: Linux
go: 1.20.x
os: ubuntu-latest
@@ -62,7 +67,7 @@ jobs:
steps:
- name: Set up Go ${{ matrix.go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
@@ -226,7 +231,7 @@ jobs:
steps:
- name: Set up Go ${{ env.latest_go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.latest_go }}
@@ -244,7 +249,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go ${{ env.latest_go }}
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ env.latest_go }}
@@ -255,7 +260,7 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.52.2
version: v1.56.1
args: --verbose --timeout 5m
# only run golangci-lint for pull requests, otherwise ALL hints get

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
/.idea
/restic
/restic.exe
/.vagrant

View File

@@ -35,6 +35,9 @@ linters:
# parse and typecheck code
- typecheck
# ensure that http response bodies are closed
- bodyclose
issues:
# don't use the default exclude rules, this hides (among others) ignored
# errors from Close() calls
@@ -51,3 +54,8 @@ issues:
# staticcheck: there's no easy way to replace these packages
- "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated"
- "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated"
exclude-rules:
# revive: ignore unused parameters in tests
- path: (_test\.go|testing\.go|backend/.*/tests\.go)
text: "unused-parameter:"

22
.readthedocs.yaml Normal file
View File

@@ -0,0 +1,22 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
# Build HTMLZip
formats:
- htmlzip
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: doc/conf.py
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: doc/requirements.txt

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,8 @@ Ways to Help Out
Thank you for your contribution! Please **open an issue first** (or add a
comment to an existing issue) if you plan to work on any code or add a new
feature. This way, duplicate work is prevented and we can discuss your ideas
and design first.
and design first. Small bugfixes are an exception to this rule, just open a
pull request in this case.
There are several ways you can help us out. First of all code contributions and
bug fixes are most welcome. However even "minor" details as fixing spelling
@@ -61,7 +62,7 @@ uploading it somewhere or post only the parts that are really relevant.
If restic gets stuck, please also include a stacktrace in the description.
On non-Windows systems, you can send a SIGQUIT signal to restic or press
`Ctrl-\` to achieve the same result. This causes restic to print a stacktrace
and then exit immediatelly. This will not damage your repository, however,
and then exit immediately. This will not damage your repository, however,
it might be necessary to manually clean up stale lock files using
`restic unlock`.

View File

@@ -1 +1 @@
0.16.1
0.16.4

View File

@@ -1,6 +1,6 @@
Bugfix: Don't abort the stats command when data blobs are missing
Runing the stats command in the blobs-per-file mode on a repository with
Running the stats command in the blobs-per-file mode on a repository with
missing data blobs previously resulted in a crash.
https://github.com/restic/restic/pull/2668

View File

@@ -1,6 +1,6 @@
Bugfix: Mark repository files as read-only when using the local backend
Files stored in a local repository were marked as writeable on the
Files stored in a local repository were marked as writable on the
filesystem for non-Windows systems, which did not prevent accidental file
modifications outside of restic. In addition, the local backend did not work
with certain filesystems and network mounts which do not permit modifications

View File

@@ -5,7 +5,7 @@ another process using an exclusive lock through a filesystem snapshot. Restic
was unable to backup those files before. This update enables backing up these
files.
This needs to be enabled explicitely using the --use-fs-snapshot option of the
This needs to be enabled explicitly using the --use-fs-snapshot option of the
backup command.
https://github.com/restic/restic/issues/340

View File

@@ -2,7 +2,7 @@ Enhancement: Parallelize scan of snapshot content in `copy` and `prune`
The `copy` and `prune` commands used to traverse the directories of
snapshots one by one to find used data. This snapshot traversal is
now parallized which can speed up this step several times.
now parallelized which can speed up this step several times.
In addition the `check` command now reports how many snapshots have
already been processed.

View File

@@ -3,7 +3,7 @@ Enhancement: Allow limiting IO concurrency for local and SFTP backend
Restic did not support limiting the IO concurrency / number of connections for
accessing repositories stored using the local or SFTP backends. The number of
connections is now limited as for other backends, and can be configured via the
the `-o local.connections=2` and `-o sftp.connections=5` options. This ensures
that restic does not overwhelm the backend with concurrent IO operations.
`-o local.connections=2` and `-o sftp.connections=5` options. This ensures that
restic does not overwhelm the backend with concurrent IO operations.
https://github.com/restic/restic/pull/3475

View File

@@ -0,0 +1,9 @@
Bugfix: Restore ARMv5 support for ARM binaries
The official release binaries for restic 0.16.1 were accidentally built to
require ARMv7. The build process is now updated to restore support for ARMv5.
Please note that restic 0.17.0 will drop support for ARMv5 and require at least
ARMv6.
https://github.com/restic/restic/issues/4540

View File

@@ -0,0 +1,8 @@
Bugfix: Repair documentation build on Read the Docs
For restic 0.16.1, no documentation was available at
https://restic.readthedocs.io/ .
The documentation build process is now updated to work again.
https://github.com/restic/restic/pull/4545

View File

@@ -0,0 +1,14 @@
Bugfix: Improve errors for irregular files on Windows
Since Go 1.21, most filesystem reparse points on Windows are considered to be
irregular files. This caused restic to show an `error: invalid node type ""`
error message for those files.
This error message has now been improved and includes the relevant file path:
`error: nodeFromFileInfo path/to/file: unsupported file type "irregular"`.
As irregular files are not required to behave like regular files, it is not
possible to provide a generic way to back up those files.
https://github.com/restic/restic/issues/4560
https://github.com/restic/restic/pull/4620
https://forum.restic.net/t/windows-backup-error-invalid-node-type/6875

View File

@@ -0,0 +1,11 @@
Bugfix: Support backup of deduplicated files on Windows again
With the official release builds of restic 0.16.1 and 0.16.2, it was not
possible to back up files that were deduplicated by the corresponding
Windows Server feature. This also applied to restic versions built using
Go 1.21.0-1.21.4.
The Go version used to build restic has now been updated to fix this.
https://github.com/restic/restic/issues/4574
https://github.com/restic/restic/pull/4621

View File

@@ -0,0 +1,11 @@
Bugfix: Improve error handling for `rclone` backend
Since restic 0.16.0, if rclone encountered an error while listing files,
this could in rare circumstances cause restic to assume that there are no
files. Although unlikely, this situation could result in data loss if it
were to happen right when the `prune` command is listing existing snapshots.
Error handling has now been improved to detect and work around this case.
https://github.com/restic/restic/issues/4612
https://github.com/restic/restic/pull/4618

View File

@@ -0,0 +1,11 @@
Bugfix: Correct `restore` progress information if an error occurs
If an error occurred while restoring a snapshot, this could cause the `restore`
progress bar to show incorrect information. In addition, if a data file could
not be loaded completely, then errors would also be reported for some already
restored files.
Error reporting of the `restore` command has now been made more accurate.
https://github.com/restic/restic/pull/4624
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943

View File

@@ -0,0 +1,11 @@
Bugfix: Improve reliability of restoring large files
In some cases restic failed to restore large files that frequently contain the
same file chunk. In combination with certain backends, this could result in
network connection timeouts that caused incomplete restores.
Restic now includes special handling for such file chunks to ensure reliable
restores.
https://github.com/restic/restic/pull/4626
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943

View File

@@ -0,0 +1,18 @@
Enhancement: Add extra verification of data integrity before upload
Hardware issues, or a bug in restic or its dependencies, could previously cause
corruption in the files restic created and stored in the repository. Detecting
such corruption previously required explicitly running the `check --read-data`
or `check --read-data-subset` commands.
To further ensure data integrity, even in the case of hardware issues or
software bugs, restic now performs additional verification of the files about
to be uploaded to the repository.
These extra checks will increase CPU usage during backups. They can therefore,
if absolutely necessary, be disabled using the `--no-extra-verify` global
option. Please note that this should be combined with more active checking
using the previously mentioned check commands.
https://github.com/restic/restic/issues/4529
https://github.com/restic/restic/pull/4681

View File

@@ -0,0 +1,19 @@
Bugfix: Downgrade zstd library to fix rare data corruption at max. compression
In restic 0.16.3, backups where the compression level was set to `max` (using
`--compression max`) could in rare and very specific circumstances result in
data corruption due to a bug in the library used for compressing data. Restic
0.16.1 and 0.16.2 were not affected.
Restic now uses the previous version of the library used to compress data, the
same version used by restic 0.16.2. Please note that the `auto` compression
level (which restic uses by default) was never affected, and even if you used
`max` compression, chances of being affected by this issue are small.
To check a repository for any corruption, run `restic check --read-data`. This
will download and verify the whole repository and can be used at any time to
completely verify the integrity of a repository. If the `check` command detects
anomalies, follow the suggested steps.
https://github.com/restic/restic/issues/4677
https://github.com/restic/restic/pull/4679

View File

@@ -3,7 +3,7 @@ Enhancement: Add local metadata cache
We've added a local cache for metadata so that restic doesn't need to load
all metadata (snapshots, indexes, ...) from the repo each time it starts. By
default the cache is active, but there's a new global option `--no-cache`
that can be used to disable the cache. By deafult, the cache a standard
that can be used to disable the cache. By default, the cache a standard
cache folder for the OS, which can be overridden with `--cache-dir`. The
cache will automatically populate, indexes and snapshots are saved as they
are loaded. Cache directories for repos that haven't been used recently can

View File

@@ -1,6 +1,6 @@
Enhancement: Make `check` print `no errors found` explicitly
The `check` command now explicetly prints `No errors were found` when no errors
The `check` command now explicitly prints `No errors were found` when no errors
could be found.
https://github.com/restic/restic/pull/1319

View File

@@ -1,4 +1,4 @@
Bugfix: Limit bandwith at the http.RoundTripper for HTTP based backends
Bugfix: Limit bandwidth at the http.RoundTripper for HTTP based backends
https://github.com/restic/restic/issues/1506
https://github.com/restic/restic/pull/1511

View File

@@ -1,7 +1,7 @@
Bugfix: backup: Remove bandwidth display
This commit removes the bandwidth displayed during backup process. It is
misleading and seldomly correct, because it's neither the "read
misleading and seldom correct, because it's neither the "read
bandwidth" (only for the very first backup) nor the "upload bandwidth".
Many users are confused about (and rightly so), c.f. #1581, #1033, #1591

View File

@@ -6,7 +6,7 @@ that means making a request (e.g. via HTTP) and returning an error when the
file already exists.
This is not accurate, the file could have been created between the HTTP request
testing for it, and when writing starts, so we've relaxed this requeriment,
testing for it, and when writing starts, so we've relaxed this requirement,
which saves one additional HTTP request per newly added file.
https://github.com/restic/restic/pull/1623

View File

@@ -1,4 +1,4 @@
Enhancement: Allow keeping a time range of snaphots
Enhancement: Allow keeping a time range of snapshots
We've added the `--keep-within` option to the `forget` command. It instructs
restic to keep all snapshots within the given duration since the newest

View File

@@ -1,7 +1,7 @@
Enhancement: Display reason why forget keeps snapshots
We've added a column to the list of snapshots `forget` keeps which details the
reasons to keep a particuliar snapshot. This makes debugging policies for
reasons to keep a particular snapshot. This makes debugging policies for
forget much easier. Please remember to always try things out with `--dry-run`!
https://github.com/restic/restic/pull/1876

View File

@@ -9,7 +9,7 @@ file should be noticed, and the modified file will be backed up. The ctime check
will be disabled if the --ignore-inode flag was given.
If this change causes problems for you, please open an issue, and we can look in
to adding a seperate flag to disable just the ctime check.
to adding a separate flag to disable just the ctime check.
https://github.com/restic/restic/issues/2179
https://github.com/restic/restic/pull/2212

View File

@@ -1,18 +1,21 @@
{{- range $changes := . }}{{ with $changes -}}
Changelog for restic {{ .Version }} ({{ .Date }})
=======================================
# Table of Contents
{{ range . -}}
* [Changelog for {{ .Version }}](#changelog-for-restic-{{ .Version | replace "." ""}}-{{ .Date | lower -}})
{{ end -}}
{{- range $changes := . }}{{ with $changes }}
# Changelog for restic {{ .Version }} ({{ .Date }})
The following sections list the changes in restic {{ .Version }} relevant to
restic users. The changes are ordered by importance.
Summary
-------
## Summary
{{ range $entry := .Entries }}{{ with $entry }}
* {{ .TypeShort }} #{{ .PrimaryID }}: {{ .Title }}
{{- end }}{{ end }}
Details
-------
## Details
{{ range $entry := .Entries }}{{ with $entry }}
* {{ .Type }} #{{ .PrimaryID }}: {{ .Title }}
{{ range $par := .Paragraphs }}
@@ -27,6 +30,5 @@ Details
{{ range $url := .OtherURLs }}
{{ $url -}}
{{ end }}
{{ end }}{{ end }}
{{ end }}{{ end -}}
{{ end }}{{ end -}}

View File

@@ -0,0 +1,16 @@
Enhancement: Support reading backup from a commands's standard output
The `backup` command now supports the `--stdin-from-command` option. When using
this option, the arguments to `backup` are interpreted as a command instead of
paths to back up. `backup` then executes the given command and stores the
standard output from it in the backup, similar to the what the `--stdin` option
does. This also enables restic to verify that the command completes with exit
code zero. A non-zero exit code causes the backup to fail.
Note that the `--stdin` option does not have to be specified at the same time,
and that the `--stdin-filename` option also applies to `--stdin-from-command`.
Example: `restic backup --stdin-from-command --stdin-filename dump.sql mysqldump [...]`
https://github.com/restic/restic/issues/4251
https://github.com/restic/restic/pull/4410

View File

@@ -0,0 +1,18 @@
Enhancement: Allow AWS Assume Role to be used for S3 backend
Previously only credentials discovered via the Minio discovery methods
were used to authenticate.
However, there are many circumstances where the discovered credentials have
lower permissions and need to assume a specific role. This is now possible
using the following new environment variables.
- RESTIC_AWS_ASSUME_ROLE_ARN
- RESTIC_AWS_ASSUME_ROLE_SESSION_NAME
- RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID
- RESTIC_AWS_ASSUME_ROLE_REGION (defaults to us-east-1)
- RESTIC_AWS_ASSUME_ROLE_POLICY
- RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT
https://github.com/restic/restic/issues/4472
https://github.com/restic/restic/pull/4474

View File

@@ -0,0 +1,8 @@
Change: Don't retry to load files that don't exist
Restic used to always retry to load files. It now only retries to load
files if they exist.
https://github.com/restic/restic/issues/4515
https://github.com/restic/restic/issues/1523
https://github.com/restic/restic/pull/4520

View File

@@ -0,0 +1,6 @@
Change: Require at least ARMv6 for ARM binaries
The official release binaries of restic now require at least ARMv6 support for ARM platforms.
https://github.com/restic/restic/issues/4540
https://github.com/restic/restic/pull/4542

View File

@@ -0,0 +1,7 @@
Enhancement: Add support for `--json` option to `version` command
Restic now supports outputting restic version and used go version, OS and
architecture via JSON when using the version command.
https://github.com/restic/restic/issues/4547
https://github.com/restic/restic/pull/4553

View File

@@ -0,0 +1,11 @@
Enhancement: Add `--ncdu` option to `ls` command
NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories.
It has an option to save a directory tree and analyse it later.
The `ls` command now supports the `--ncdu` option which outputs information
about a snapshot in the NCDU format.
You can use it as follows: `restic ls latest --ncdu | ncdu -f -`
https://github.com/restic/restic/issues/4549
https://github.com/restic/restic/pull/4550

View File

@@ -0,0 +1,12 @@
Enhancement: Ignore s3.storage-class for metadata if archive tier is specified
There is no official cold storage support in restic, use this option at your
own risk.
Restic always stored all files on s3 using the specified `s3.storage-class`.
Now, restic will store metadata using a non-archive storage tier to avoid
problems when accessing a repository. To restore any data, it is still
necessary to manually warm up the required data beforehand.
https://github.com/restic/restic/issues/4583
https://github.com/restic/restic/pull/4584

View File

@@ -0,0 +1,7 @@
Bugfix: Properly report the ID of newly added keys
`restic key add` now reports the ID of a newly added key. This simplifies
selecting a specific key using the `--key-hint key` option.
https://github.com/restic/restic/issues/4656
https://github.com/restic/restic/pull/4657

View File

@@ -0,0 +1,8 @@
Enhancement: Move key add, list, remove and passwd as separate sub-commands
Restic now provides usage documentation for the `key` command. Each sub-command;
`add`, `list`, `remove` and `passwd` now have their own sub-command documentation
which can be invoked using `restic key <add|list|remove|passwd> --help`.
https://github.com/restic/restic/issues/4676
https://github.com/restic/restic/pull/4685

View File

@@ -0,0 +1,8 @@
Enhancement: Add --target flag to the dump command
Restic `dump` always printed to the standard output. It now permits to select a
`--target` file to write the output to.
https://github.com/restic/restic/issues/4678
https://github.com/restic/restic/pull/4682
https://github.com/restic/restic/pull/4692

View File

@@ -0,0 +1,7 @@
Bugfix: Correct hardlink handling in `stats` command
If files on different devices had the same inode id, then the `stats` command
did not correctly calculate the snapshot size. This has been fixed.
https://github.com/restic/restic/pull/4503
https://forum.restic.net/t/possible-bug-in-stats/6461/8

View File

@@ -0,0 +1,11 @@
Enhancement: Add bitrot detection to `diff` command
The output of the `diff` command now includes the modifier `?` for files
to indicate bitrot in backed up files. It will appear whenever there is a
difference in content while the metadata is exactly the same. Since files with
unchanged metadata are normally not read again when creating a backup, the
detection is only effective if the right-hand side of the diff has been created
with "backup --force".
https://github.com/restic/restic/issues/805
https://github.com/restic/restic/pull/4526

View File

@@ -0,0 +1,5 @@
Enhancement: Add `--new-host` and `--new-time` options to `rewrite` command
`restic rewrite` now allows rewriting the host and / or time metadata of a snapshot.
https://github.com/restic/restic/pull/4573

View File

@@ -0,0 +1,7 @@
Enhancement: `mount` tests mountpoint existence before opening the repository
The restic `mount` command now checks for the existence of the
mountpoint before opening the repository, leading to quicker error
detection.
https://github.com/restic/restic/pull/4590

View File

@@ -0,0 +1,6 @@
Bugfix: `find` ignored directories in some cases
In some cases, the `find` command ignored empty or moved directories. This has
been fixed.
https://github.com/restic/restic/pull/4615

View File

@@ -0,0 +1,10 @@
Enhancement: Improve `repair packs` command
The `repair packs` command has been improved to also be able to process
truncated pack files. The `check --read-data` command will provide instructions
on using the command if necessary to repair a repository. See the guide at
https://restic.readthedocs.io/en/stable/077_troubleshooting.html for further
instructions.
https://github.com/restic/restic/pull/4644
https://github.com/restic/restic/pull/4655

View File

@@ -12,7 +12,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/spf13/cobra"
@@ -25,7 +24,6 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/backup"
"github.com/restic/restic/internal/ui/termstatus"
)
@@ -44,7 +42,7 @@ Exit status is 0 if the command was successful.
Exit status is 1 if there was a fatal error (no snapshot created).
Exit status is 3 if some source data could not be read (incomplete snapshot created).
`,
PreRun: func(cmd *cobra.Command, args []string) {
PreRun: func(_ *cobra.Command, _ []string) {
if backupOptions.Host == "" {
hostname, err := os.Hostname()
if err != nil {
@@ -56,31 +54,9 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
},
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
var wg sync.WaitGroup
cancelCtx, cancel := context.WithCancel(ctx)
defer func() {
// shutdown termstatus
cancel()
wg.Wait()
}()
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// use the terminal for stdout/stderr
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
defer func() {
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
}()
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return runBackup(ctx, backupOptions, globalOptions, term, args)
term, cancel := setupTermstatus()
defer cancel()
return runBackup(cmd.Context(), backupOptions, globalOptions, term, args)
},
}
@@ -97,6 +73,7 @@ type BackupOptions struct {
ExcludeLargerThan string
Stdin bool
StdinFilename string
StdinCommand bool
Tags restic.TagLists
Host string
FilesFrom []string
@@ -134,6 +111,7 @@ func init() {
f.StringVar(&backupOptions.ExcludeLargerThan, "exclude-larger-than", "", "max `size` of the files to be backed up (allowed suffixes: k/K, m/M, g/G, t/T)")
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "`filename` to use when reading from stdin")
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout")
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
@@ -148,7 +126,7 @@ func init() {
f.StringArrayVar(&backupOptions.FilesFromRaw, "files-from-raw", nil, "read the files to backup from `file` (can be combined with file args; can be specified multiple times)")
f.StringVar(&backupOptions.TimeStamp, "time", "", "`time` of the backup (ex. '2012-11-01 22:08:41') (default: now)")
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number and ctime changes when checking for modified files")
f.BoolVar(&backupOptions.IgnoreCtime, "ignore-ctime", false, "ignore ctime changes when checking for modified files")
f.BoolVarP(&backupOptions.DryRun, "dry-run", "n", false, "do not upload or write any data, just show what would be done")
f.BoolVar(&backupOptions.NoScan, "no-scan", false, "do not run scanner to estimate size of backup")
@@ -287,7 +265,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
}
}
if opts.Stdin {
if opts.Stdin || opts.StdinCommand {
if len(opts.FilesFrom) > 0 {
return errors.Fatal("--stdin and --files-from cannot be used together")
}
@@ -298,7 +276,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
return errors.Fatal("--stdin and --files-from-raw cannot be used together")
}
if len(args) > 0 {
if len(args) > 0 && !opts.StdinCommand {
return errors.Fatal("--stdin was specified and files/dirs were listed as arguments")
}
}
@@ -366,7 +344,7 @@ func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc,
// collectTargets returns a list of target files/dirs from several sources.
func collectTargets(opts BackupOptions, args []string) (targets []string, err error) {
if opts.Stdin {
if opts.Stdin || opts.StdinCommand {
return nil, nil
}
@@ -433,7 +411,7 @@ func collectTargets(opts BackupOptions, args []string) (targets []string, err er
// parent returns the ID of the parent snapshot. If there is none, nil is
// returned.
func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string, timeStampLimit time.Time) (*restic.Snapshot, error) {
func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, opts BackupOptions, targets []string, timeStampLimit time.Time) (*restic.Snapshot, error) {
if opts.Force {
return nil, nil
}
@@ -453,7 +431,7 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
f.Tags = []restic.TagList{opts.Tags.Flatten()}
}
sn, _, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
sn, _, err := f.FindLatest(ctx, repo, repo, snName)
// Snapshot not found is ok if no explicit parent was set
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
err = nil
@@ -592,16 +570,24 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
defer localVss.DeleteSnapshots()
targetFS = localVss
}
if opts.Stdin {
if opts.Stdin || opts.StdinCommand {
if !gopts.JSON {
progressPrinter.V("read data from stdin")
}
filename := path.Join("/", opts.StdinFilename)
var source io.ReadCloser = os.Stdin
if opts.StdinCommand {
source, err = fs.NewCommandReader(ctx, args, globalOptions.stderr)
if err != nil {
return err
}
}
targetFS = &fs.Reader{
ModTime: timeStamp,
Name: filename,
Mode: 0644,
ReadCloser: os.Stdin,
ReadCloser: source,
}
targets = []string{filename}
}
@@ -623,14 +609,20 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
wg.Go(func() error { return sc.Scan(cancelCtx, targets) })
}
arch := archiver.New(repo, targetFS, archiver.Options{ReadConcurrency: backupOptions.ReadConcurrency})
arch := archiver.New(repo, targetFS, archiver.Options{ReadConcurrency: opts.ReadConcurrency})
arch.SelectByName = selectByNameFilter
arch.Select = selectFilter
arch.WithAtime = opts.WithAtime
success := true
arch.Error = func(item string, err error) error {
success = false
return progressReporter.Error(item, err)
reterr := progressReporter.Error(item, err)
// If we receive a fatal error during the execution of the snapshot,
// we abort the snapshot.
if reterr == nil && errors.IsFatal(err) {
reterr = err
}
return reterr
}
arch.CompleteItem = progressReporter.CompleteItem
arch.StartFile = progressReporter.StartFile

View File

@@ -9,6 +9,7 @@ import (
"runtime"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
@@ -265,7 +266,7 @@ func TestBackupTreeLoadError(t *testing.T) {
// delete the subdirectory pack first
for id := range treePacks {
rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
}
testRunRebuildIndex(t, env.gopts)
// now the repo is missing the tree blob in the index; check should report this
@@ -567,3 +568,72 @@ func linkEqual(source, dest []string) bool {
return true
}
func TestStdinFromCommand(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testSetupBackupData(t, env)
opts := BackupOptions{
StdinCommand: true,
StdinFilename: "stdin",
}
testRunBackup(t, filepath.Dir(env.testdata), []string{"python", "-c", "import sys; print('something'); sys.exit(0)"}, opts, env.gopts)
testListSnapshots(t, env.gopts, 1)
testRunCheck(t, env.gopts)
}
func TestStdinFromCommandNoOutput(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testSetupBackupData(t, env)
opts := BackupOptions{
StdinCommand: true,
StdinFilename: "stdin",
}
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"python", "-c", "import sys; sys.exit(0)"}, opts, env.gopts)
rtest.Assert(t, err != nil && err.Error() == "at least one source file could not be read", "No data error expected")
testListSnapshots(t, env.gopts, 1)
testRunCheck(t, env.gopts)
}
func TestStdinFromCommandFailExitCode(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testSetupBackupData(t, env)
opts := BackupOptions{
StdinCommand: true,
StdinFilename: "stdin",
}
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"python", "-c", "import sys; print('test'); sys.exit(1)"}, opts, env.gopts)
rtest.Assert(t, err != nil, "Expected error while backing up")
testListSnapshots(t, env.gopts, 0)
testRunCheck(t, env.gopts)
}
func TestStdinFromCommandFailNoOutputAndExitCode(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testSetupBackupData(t, env)
opts := BackupOptions{
StdinCommand: true,
StdinFilename: "stdin",
}
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"python", "-c", "import sys; sys.exit(1)"}, opts, env.gopts)
rtest.Assert(t, err != nil, "Expected error while backing up")
testListSnapshots(t, env.gopts, 0)
testRunCheck(t, env.gopts)
}

View File

@@ -28,7 +28,7 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(_ *cobra.Command, args []string) error {
return runCache(cacheOptions, globalOptions, args)
},
}

View File

@@ -106,7 +106,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
Println(string(buf))
return nil
case "snapshot":
sn, _, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
sn, _, err := restic.FindSnapshot(ctx, repo, repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}
@@ -154,7 +154,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
return nil
case "pack":
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
h := backend.Handle{Type: restic.PackFile, Name: id.String()}
buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h)
if err != nil {
return err
@@ -193,7 +193,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
return errors.Fatal("blob not found")
case "tree":
sn, subfolder, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
sn, subfolder, err := restic.FindSnapshot(ctx, repo, repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}

View File

@@ -38,7 +38,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
RunE: func(cmd *cobra.Command, args []string) error {
return runCheck(cmd.Context(), checkOptions, globalOptions, args)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
PreRunE: func(_ *cobra.Command, _ []string) error {
return checkFlags(checkOptions)
},
}
@@ -336,20 +336,18 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
errorsFound = true
Warnf("%v\n", err)
if err, ok := err.(*checker.ErrPackData); ok {
if strings.Contains(err.Error(), "wrong data returned, hash is") {
salvagePacks = append(salvagePacks, err.PackID)
}
salvagePacks = append(salvagePacks, err.PackID)
}
}
p.Done()
if len(salvagePacks) > 0 {
Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n")
var strIds []string
Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n")
var strIDs []string
for _, id := range salvagePacks {
strIds = append(strIds, id.String())
strIDs = append(strIDs, id.String())
}
Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIds, " "))
Warnf("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " "))
Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n")
}
}
@@ -417,7 +415,7 @@ func selectPacksByBucket(allPacks map[restic.ID]int64, bucket, totalBuckets uint
return packs
}
// selectRandomPacksByPercentage selects the given percentage of packs which are randomly choosen.
// selectRandomPacksByPercentage selects the given percentage of packs which are randomly chosen.
func selectRandomPacksByPercentage(allPacks map[restic.ID]int64, percentage float64) map[restic.ID]int64 {
packCount := len(allPacks)
packsToCheck := int(float64(packCount) * (percentage / 100.0))

View File

@@ -71,7 +71,7 @@ func TestSelectPacksByBucket(t *testing.T) {
var testPacks = make(map[restic.ID]int64)
for i := 1; i <= 10; i++ {
id := restic.NewRandomID()
// ensure relevant part of generated id is reproducable
// ensure relevant part of generated id is reproducible
id[0] = byte(i)
testPacks[id] = 0
}
@@ -124,7 +124,7 @@ func TestSelectRandomPacksByPercentage(t *testing.T) {
}
func TestSelectNoRandomPacksByPercentage(t *testing.T) {
// that the a repository without pack files works
// that the repository without pack files works
var testPacks = make(map[restic.ID]int64)
selectedPacks := selectRandomPacksByPercentage(testPacks, 10.0)
rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs")
@@ -158,7 +158,7 @@ func TestSelectRandomPacksByFileSize(t *testing.T) {
}
func TestSelectNoRandomPacksByFileSize(t *testing.T) {
// that the a repository without pack files works
// that the repository without pack files works
var testPacks = make(map[restic.ID]int64)
selectedPacks := selectRandomPacksByFileSize(testPacks, 10, 500)
rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs")

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
@@ -88,12 +87,12 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
return err
}
srcSnapshotLister, err := backend.MemorizeList(ctx, srcRepo.Backend(), restic.SnapshotFile)
srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile)
if err != nil {
return err
}
dstSnapshotLister, err := backend.MemorizeList(ctx, dstRepo.Backend(), restic.SnapshotFile)
dstSnapshotLister, err := restic.MemorizeList(ctx, dstRepo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -127,11 +126,12 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
if sn.Original != nil {
srcOriginal = *sn.Original
}
if originalSns, ok := dstSnapshotByOriginal[srcOriginal]; ok {
isCopy := false
for _, originalSn := range originalSns {
if similarSnapshots(originalSn, sn) {
Verboseff("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verboseff("\n%v\n", sn)
Verboseff("skipping source snapshot %s, was already copied to snapshot %s\n", sn.ID().Str(), originalSn.ID().Str())
isCopy = true
break
@@ -141,7 +141,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
continue
}
}
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
Verbosef(" copy started, this may take a while...\n")
if err := copyTree(ctx, srcRepo, dstRepo, visitedTrees, *sn.Tree, gopts.Quiet); err != nil {
return err

View File

@@ -52,19 +52,23 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
},
}
var tryRepair bool
var repairByte bool
var extractPack bool
var reuploadBlobs bool
type DebugExamineOptions struct {
TryRepair bool
RepairByte bool
ExtractPack bool
ReuploadBlobs bool
}
var debugExamineOpts DebugExamineOptions
func init() {
cmdRoot.AddCommand(cmdDebug)
cmdDebug.AddCommand(cmdDebugDump)
cmdDebug.AddCommand(cmdDebugExamine)
cmdDebugExamine.Flags().BoolVar(&extractPack, "extract-pack", false, "write blobs to the current directory")
cmdDebugExamine.Flags().BoolVar(&reuploadBlobs, "reupload-blobs", false, "reupload blobs to the repository")
cmdDebugExamine.Flags().BoolVar(&tryRepair, "try-repair", false, "try to repair broken blobs with single bit flips")
cmdDebugExamine.Flags().BoolVar(&repairByte, "repair-byte", false, "try to repair broken blobs by trying bytes")
cmdDebugExamine.Flags().BoolVar(&debugExamineOpts.ExtractPack, "extract-pack", false, "write blobs to the current directory")
cmdDebugExamine.Flags().BoolVar(&debugExamineOpts.ReuploadBlobs, "reupload-blobs", false, "reupload blobs to the repository")
cmdDebugExamine.Flags().BoolVar(&debugExamineOpts.TryRepair, "try-repair", false, "try to repair broken blobs with single bit flips")
cmdDebugExamine.Flags().BoolVar(&debugExamineOpts.RepairByte, "repair-byte", false, "try to repair broken blobs by trying bytes")
}
func prettyPrintJSON(wr io.Writer, item interface{}) error {
@@ -78,7 +82,7 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error {
}
func debugPrintSnapshots(ctx context.Context, repo *repository.Repository, wr io.Writer) error {
return restic.ForAllSnapshots(ctx, repo.Backend(), repo, nil, func(id restic.ID, snapshot *restic.Snapshot, err error) error {
return restic.ForAllSnapshots(ctx, repo, repo, nil, func(id restic.ID, snapshot *restic.Snapshot, err error) error {
if err != nil {
return err
}
@@ -107,7 +111,7 @@ type Blob struct {
func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer) error {
var m sync.Mutex
return restic.ParallelList(ctx, repo.Backend(), restic.PackFile, repo.Connections(), func(ctx context.Context, id restic.ID, size int64) error {
return restic.ParallelList(ctx, repo, restic.PackFile, repo.Connections(), func(ctx context.Context, id restic.ID, size int64) error {
blobs, _, err := repo.ListPack(ctx, id, size)
if err != nil {
Warnf("error for pack %v: %v\n", id.Str(), err)
@@ -133,8 +137,8 @@ func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer)
})
}
func dumpIndexes(ctx context.Context, repo restic.Repository, wr io.Writer) error {
return index.ForAllIndexes(ctx, repo.Backend(), repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
func dumpIndexes(ctx context.Context, repo restic.ListerLoaderUnpacked, wr io.Writer) error {
return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
Printf("index_id: %v\n", id)
if err != nil {
return err
@@ -196,7 +200,7 @@ var cmdDebugExamine = &cobra.Command{
Short: "Examine a pack file",
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runDebugExamine(cmd.Context(), globalOptions, args)
return runDebugExamine(cmd.Context(), globalOptions, debugExamineOpts, args)
},
}
@@ -290,7 +294,7 @@ func tryRepairWithBitflip(ctx context.Context, key *crypto.Key, input []byte, by
})
err := wg.Wait()
if err != nil {
panic("all go rountines can only return nil")
panic("all go routines can only return nil")
}
if !found {
@@ -315,20 +319,20 @@ func decryptUnsigned(ctx context.Context, k *crypto.Key, buf []byte) []byte {
return out
}
func loadBlobs(ctx context.Context, repo restic.Repository, packID restic.ID, list []restic.Blob) error {
func loadBlobs(ctx context.Context, opts DebugExamineOptions, repo restic.Repository, packID restic.ID, list []restic.Blob) error {
dec, err := zstd.NewReader(nil)
if err != nil {
panic(err)
}
be := repo.Backend()
h := restic.Handle{
h := backend.Handle{
Name: packID.String(),
Type: restic.PackFile,
}
wg, ctx := errgroup.WithContext(ctx)
if reuploadBlobs {
if opts.ReuploadBlobs {
repo.StartPackUploader(ctx, wg)
}
@@ -356,8 +360,8 @@ func loadBlobs(ctx context.Context, repo restic.Repository, packID restic.ID, li
filePrefix := ""
if err != nil {
Warnf("error decrypting blob: %v\n", err)
if tryRepair || repairByte {
plaintext = tryRepairWithBitflip(ctx, key, buf, repairByte)
if opts.TryRepair || opts.RepairByte {
plaintext = tryRepairWithBitflip(ctx, key, buf, opts.RepairByte)
}
if plaintext != nil {
outputPrefix = "repaired "
@@ -391,13 +395,13 @@ func loadBlobs(ctx context.Context, repo restic.Repository, packID restic.ID, li
Printf(" successfully %vdecrypted blob (length %v), hash is %v, ID matches\n", outputPrefix, len(plaintext), id)
prefix = "correct-"
}
if extractPack {
if opts.ExtractPack {
err = storePlainBlob(id, filePrefix+prefix, plaintext)
if err != nil {
return err
}
}
if reuploadBlobs {
if opts.ReuploadBlobs {
_, _, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true)
if err != nil {
return err
@@ -406,7 +410,7 @@ func loadBlobs(ctx context.Context, repo restic.Repository, packID restic.ID, li
}
}
if reuploadBlobs {
if opts.ReuploadBlobs {
return repo.Flush(ctx)
}
return nil
@@ -437,7 +441,7 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error {
return nil
}
func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) error {
func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
@@ -447,7 +451,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
for _, name := range args {
id, err := restic.ParseID(name)
if err != nil {
id, err = restic.Find(ctx, repo.Backend(), restic.PackFile, name)
id, err = restic.Find(ctx, repo, restic.PackFile, name)
if err != nil {
Warnf("error: %v\n", err)
continue
@@ -476,7 +480,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
}
for _, id := range ids {
err := examinePack(ctx, repo, id)
err := examinePack(ctx, opts, repo, id)
if err != nil {
Warnf("error: %v\n", err)
}
@@ -487,10 +491,10 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
return nil
}
func examinePack(ctx context.Context, repo restic.Repository, id restic.ID) error {
func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repository, id restic.ID) error {
Printf("examine %v\n", id)
h := restic.Handle{
h := backend.Handle{
Type: restic.PackFile,
Name: id.String(),
}
@@ -524,7 +528,7 @@ func examinePack(ctx context.Context, repo restic.Repository, id restic.ID) erro
checkPackSize(blobs, fi.Size)
err = loadBlobs(ctx, repo, id, blobs)
err = loadBlobs(ctx, opts, repo, id, blobs)
if err != nil {
Warnf("error: %v\n", err)
} else {
@@ -542,7 +546,7 @@ func examinePack(ctx context.Context, repo restic.Repository, id restic.ID) erro
checkPackSize(blobs, fi.Size)
if !blobsLoaded {
return loadBlobs(ctx, repo, id, blobs)
return loadBlobs(ctx, opts, repo, id, blobs)
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"reflect"
"sort"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -28,6 +27,10 @@ directory:
* U The metadata (access mode, timestamps, ...) for the item was updated
* M The file's content was modified
* T The type was changed, e.g. a file was made a symlink
* ? Bitrot detected: The file's content has changed but all metadata is the same
Metadata comparison will likely not work if a backup was created using the
'--ignore-inode' or '--ignore-ctime' option.
To only compare files in specific subfolders, you can use the
"<snapshotID>:<subfolder>" syntax, where "subfolder" is a path within the
@@ -58,7 +61,7 @@ func init() {
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
}
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, string, error) {
func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.LoaderUnpacked, desc string) (*restic.Snapshot, string, error) {
sn, subfolder, err := restic.FindSnapshot(ctx, be, repo, desc)
if err != nil {
return nil, "", errors.Fatal(err.Error())
@@ -68,7 +71,7 @@ func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository,
// Comparer collects all things needed to compare two snapshots.
type Comparer struct {
repo restic.Repository
repo restic.BlobLoader
opts DiffOptions
printChange func(change *Change)
}
@@ -144,7 +147,7 @@ type DiffStatsContainer struct {
}
// updateBlobs updates the blob counters in the stats struct.
func updateBlobs(repo restic.Repository, blobs restic.BlobSet, stats *DiffStat) {
func updateBlobs(repo restic.Loader, blobs restic.BlobSet, stats *DiffStat) {
for h := range blobs {
switch h.Type {
case restic.DataBlob:
@@ -273,6 +276,16 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref
!reflect.DeepEqual(node1.Content, node2.Content) {
mod += "M"
stats.ChangedFiles++
node1NilContent := *node1
node2NilContent := *node2
node1NilContent.Content = nil
node2NilContent.Content = nil
// the bitrot detection may not work if `backup --ignore-inode` or `--ignore-ctime` were used
if node1NilContent.Equals(node2NilContent) {
// probable bitrot detected
mod += "?"
}
} else if c.opts.ShowMetadata && !node1.Equals(*node2) {
mod += "U"
}
@@ -346,7 +359,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
}
// cache snapshots listing
be, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -388,7 +401,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
c := &Comparer{
repo: repo,
opts: diffOptions,
opts: opts,
printChange: func(change *Change) {
Printf("%-5s%v\n", change.Modifier, change.Path)
},
@@ -405,7 +418,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
}
if gopts.Quiet {
c.printChange = func(change *Change) {}
c.printChange = func(_ *Change) {}
}
stats := &DiffStatsContainer{

View File

@@ -46,6 +46,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
type DumpOptions struct {
restic.SnapshotFilter
Archive string
Target string
}
var dumpOptions DumpOptions
@@ -56,6 +57,7 @@ func init() {
flags := cmdDump.Flags()
initSingleSnapshotFilter(flags, &dumpOptions.SnapshotFilter)
flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"")
flags.StringVarP(&dumpOptions.Target, "target", "t", "", "write the output to target `path`")
}
func splitPath(p string) []string {
@@ -67,11 +69,11 @@ func splitPath(p string) []string {
return append(s, f)
}
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string, d *dump.Dumper) error {
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoader, prefix string, pathComponents []string, d *dump.Dumper, canWriteArchiveFunc func() error) error {
// If we print / we need to assume that there are multiple nodes at that
// level in the tree.
if pathComponents[0] == "" {
if err := checkStdoutArchive(); err != nil {
if err := canWriteArchiveFunc(); err != nil {
return err
}
return d.DumpTree(ctx, tree, "/")
@@ -91,9 +93,9 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repositor
if err != nil {
return errors.Wrapf(err, "cannot load subtree for %q", item)
}
return printFromTree(ctx, subtree, repo, item, pathComponents[1:], d)
return printFromTree(ctx, subtree, repo, item, pathComponents[1:], d, canWriteArchiveFunc)
case dump.IsDir(node):
if err := checkStdoutArchive(); err != nil {
if err := canWriteArchiveFunc(); err != nil {
return err
}
subtree, err := restic.LoadTree(ctx, repo, *node.Subtree)
@@ -147,7 +149,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
}).FindLatest(ctx, repo.Backend(), repo, snapshotIDString)
}).FindLatest(ctx, repo, repo, snapshotIDString)
if err != nil {
return errors.Fatalf("failed to find snapshot: %v", err)
}
@@ -168,8 +170,24 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
}
d := dump.New(opts.Archive, repo, os.Stdout)
err = printFromTree(ctx, tree, repo, "/", splittedPath, d)
outputFileWriter := os.Stdout
canWriteArchiveFunc := checkStdoutArchive
if opts.Target != "" {
file, err := os.Create(opts.Target)
if err != nil {
return fmt.Errorf("cannot dump to file: %w", err)
}
defer func() {
_ = file.Close()
}()
outputFileWriter = file
canWriteArchiveFunc = func() error { return nil }
}
d := dump.New(opts.Archive, repo, outputFileWriter)
err = printFromTree(ctx, tree, repo, "/", splittedPath, d, canWriteArchiveFunc)
if err != nil {
return errors.Fatalf("cannot dump file: %v", err)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
@@ -245,13 +244,12 @@ func (s *statefulOutput) Finish() {
// Finder bundles information needed to find a file or directory.
type Finder struct {
repo restic.Repository
pat findPattern
out statefulOutput
ignoreTrees restic.IDSet
blobIDs map[string]struct{}
treeIDs map[string]struct{}
itemsFound int
repo restic.Repository
pat findPattern
out statefulOutput
blobIDs map[string]struct{}
treeIDs map[string]struct{}
itemsFound int
}
func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error {
@@ -262,17 +260,17 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
}
f.out.newsn = sn
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
return walker.Walk(ctx, f.repo, *sn.Tree, walker.WalkVisitor{ProcessNode: func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) error {
if err != nil {
debug.Log("Error loading tree %v: %v", parentTreeID, err)
Printf("Unable to load tree %s\n ... which belongs to snapshot %s\n", parentTreeID, sn.ID())
return false, walker.ErrSkipNode
return walker.ErrSkipNode
}
if node == nil {
return false, nil
return nil
}
normalizedNodepath := nodepath
@@ -285,7 +283,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
for _, pat := range f.pat.pattern {
found, err := filter.Match(pat, normalizedNodepath)
if err != nil {
return false, err
return err
}
if found {
foundMatch = true
@@ -293,16 +291,13 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
}
}
var (
ignoreIfNoMatch = true
errIfNoMatch error
)
var errIfNoMatch error
if node.Type == "dir" {
var childMayMatch bool
for _, pat := range f.pat.pattern {
mayMatch, err := filter.ChildMatch(pat, normalizedNodepath)
if err != nil {
return false, err
return err
}
if mayMatch {
childMayMatch = true
@@ -311,31 +306,28 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
}
if !childMayMatch {
ignoreIfNoMatch = true
errIfNoMatch = walker.ErrSkipNode
} else {
ignoreIfNoMatch = false
}
}
if !foundMatch {
return ignoreIfNoMatch, errIfNoMatch
return errIfNoMatch
}
if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) {
debug.Log(" ModTime is older than %s\n", f.pat.oldest)
return ignoreIfNoMatch, errIfNoMatch
return errIfNoMatch
}
if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) {
debug.Log(" ModTime is newer than %s\n", f.pat.newest)
return ignoreIfNoMatch, errIfNoMatch
return errIfNoMatch
}
debug.Log(" found match\n")
f.out.PrintPattern(nodepath, node)
return false, nil
})
return nil
}})
}
func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
@@ -346,17 +338,17 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
}
f.out.newsn = sn
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
return walker.Walk(ctx, f.repo, *sn.Tree, walker.WalkVisitor{ProcessNode: func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) error {
if err != nil {
debug.Log("Error loading tree %v: %v", parentTreeID, err)
Printf("Unable to load tree %s\n ... which belongs to snapshot %s\n", parentTreeID, sn.ID())
return false, walker.ErrSkipNode
return walker.ErrSkipNode
}
if node == nil {
return false, nil
return nil
}
if node.Type == "dir" && f.treeIDs != nil {
@@ -374,7 +366,7 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
// looking for blobs)
if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil {
// Return an error to terminate the Walk
return true, errors.New("OK")
return errors.New("OK")
}
}
}
@@ -395,8 +387,8 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
}
}
return false, nil
})
return nil
}})
}
var errAllPacksFound = errors.New("all packs found")
@@ -584,7 +576,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
}
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -594,10 +586,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
}
f := &Finder{
repo: repo,
pat: pat,
out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
ignoreTrees: restic.NewIDSet(),
repo: repo,
pat: pat,
out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
}
if opts.BlobID {

View File

@@ -33,7 +33,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runForget(cmd.Context(), forgetOptions, globalOptions, args)
return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args)
},
}
@@ -98,6 +98,7 @@ type ForgetOptions struct {
}
var forgetOptions ForgetOptions
var forgetPruneOptions PruneOptions
func init() {
cmdRoot.AddCommand(cmdForget)
@@ -132,7 +133,7 @@ func init() {
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
f.SortFlags = false
addPruneOptions(cmdForget)
addPruneOptions(cmdForget, &forgetPruneOptions)
}
func verifyForgetOptions(opts *ForgetOptions) error {
@@ -151,7 +152,7 @@ func verifyForgetOptions(opts *ForgetOptions) error {
return nil
}
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error {
err := verifyForgetOptions(&opts)
if err != nil {
return err
@@ -183,7 +184,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
var snapshots restic.Snapshots
removeSnIDs := restic.NewIDSet()
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}

View File

@@ -9,5 +9,8 @@ import (
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
opts := ForgetOptions{}
rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
pruneOpts := PruneOptions{
MaxUnused: "5%",
}
rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args))
}

View File

@@ -21,7 +21,9 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: runGenerate,
RunE: func(_ *cobra.Command, args []string) error {
return runGenerate(genOpts, args)
},
}
type generateOptions struct {
@@ -90,48 +92,48 @@ func writePowerShellCompletion(file string) error {
return cmdRoot.GenPowerShellCompletionFile(file)
}
func runGenerate(_ *cobra.Command, args []string) error {
func runGenerate(opts generateOptions, args []string) error {
if len(args) > 0 {
return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
}
if genOpts.ManDir != "" {
err := writeManpages(genOpts.ManDir)
if opts.ManDir != "" {
err := writeManpages(opts.ManDir)
if err != nil {
return err
}
}
if genOpts.BashCompletionFile != "" {
err := writeBashCompletion(genOpts.BashCompletionFile)
if opts.BashCompletionFile != "" {
err := writeBashCompletion(opts.BashCompletionFile)
if err != nil {
return err
}
}
if genOpts.FishCompletionFile != "" {
err := writeFishCompletion(genOpts.FishCompletionFile)
if opts.FishCompletionFile != "" {
err := writeFishCompletion(opts.FishCompletionFile)
if err != nil {
return err
}
}
if genOpts.ZSHCompletionFile != "" {
err := writeZSHCompletion(genOpts.ZSHCompletionFile)
if opts.ZSHCompletionFile != "" {
err := writeZSHCompletion(opts.ZSHCompletionFile)
if err != nil {
return err
}
}
if genOpts.PowerShellCompletionFile != "" {
err := writePowerShellCompletion(genOpts.PowerShellCompletionFile)
if opts.PowerShellCompletionFile != "" {
err := writePowerShellCompletion(opts.PowerShellCompletionFile)
if err != nil {
return err
}
}
var empty generateOptions
if genOpts == empty {
if opts == empty {
return errors.Fatal("nothing to do, please specify at least one output file/dir")
}

View File

@@ -75,7 +75,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
return err
}
repo, err := ReadRepo(gopts)
gopts.Repo, err = ReadRepo(gopts)
if err != nil {
return err
}
@@ -87,7 +87,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
return err
}
be, err := create(ctx, repo, gopts, gopts.extended)
be, err := create(ctx, gopts.Repo, gopts, gopts.extended)
if err != nil {
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
}

View File

@@ -1,265 +1,18 @@
package main
import (
"context"
"encoding/json"
"os"
"strings"
"sync"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/table"
"github.com/spf13/cobra"
)
var cmdKey = &cobra.Command{
Use: "key [flags] [list|add|remove|passwd] [ID]",
Use: "key",
Short: "Manage keys (passwords)",
Long: `
The "key" command manages keys (passwords) for accessing the repository.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKey(cmd.Context(), globalOptions, args)
},
The "key" command allows you to set multiple access keys or passwords
per repository.
`,
}
var (
newPasswordFile string
keyUsername string
keyHostname string
)
func init() {
cmdRoot.AddCommand(cmdKey)
flags := cmdKey.Flags()
flags.StringVarP(&newPasswordFile, "new-password-file", "", "", "`file` from which to read the new password")
flags.StringVarP(&keyUsername, "user", "", "", "the username for new keys")
flags.StringVarP(&keyHostname, "host", "", "", "the hostname for new keys")
}
func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions) error {
type keyInfo struct {
Current bool `json:"current"`
ID string `json:"id"`
UserName string `json:"userName"`
HostName string `json:"hostName"`
Created string `json:"created"`
}
var m sync.Mutex
var keys []keyInfo
err := restic.ParallelList(ctx, s.Backend(), restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, size int64) error {
k, err := repository.LoadKey(ctx, s, id)
if err != nil {
Warnf("LoadKey() failed: %v\n", err)
return nil
}
key := keyInfo{
Current: id == s.KeyID(),
ID: id.Str(),
UserName: k.Username,
HostName: k.Hostname,
Created: k.Created.Local().Format(TimeFormat),
}
m.Lock()
defer m.Unlock()
keys = append(keys, key)
return nil
})
if err != nil {
return err
}
if gopts.JSON {
return json.NewEncoder(globalOptions.stdout).Encode(keys)
}
tab := table.New()
tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}")
tab.AddColumn("User", "{{ .UserName }}")
tab.AddColumn("Host", "{{ .HostName }}")
tab.AddColumn("Created", "{{ .Created }}")
for _, key := range keys {
tab.AddRow(key)
}
return tab.Write(globalOptions.stdout)
}
// testKeyNewPassword is used to set a new password during integration testing.
var testKeyNewPassword string
func getNewPassword(gopts GlobalOptions) (string, error) {
if testKeyNewPassword != "" {
return testKeyNewPassword, nil
}
if newPasswordFile != "" {
return loadPasswordFromFile(newPasswordFile)
}
// Since we already have an open repository, temporary remove the password
// to prompt the user for the passwd.
newopts := gopts
newopts.password = ""
return ReadPasswordTwice(newopts,
"enter new password: ",
"enter password again: ")
}
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions) error {
pw, err := getNewPassword(gopts)
if err != nil {
return err
}
id, err := repository.AddKey(ctx, repo, pw, keyUsername, keyHostname, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
err = switchToNewKeyAndRemoveIfBroken(ctx, repo, id, pw)
if err != nil {
return err
}
Verbosef("saved new key as %s\n", id)
return nil
}
func deleteKey(ctx context.Context, repo *repository.Repository, id restic.ID) error {
if id == repo.KeyID() {
return errors.Fatal("refusing to remove key currently used to access repository")
}
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
err := repo.Backend().Remove(ctx, h)
if err != nil {
return err
}
Verbosef("removed key %v\n", id)
return nil
}
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions) error {
pw, err := getNewPassword(gopts)
if err != nil {
return err
}
id, err := repository.AddKey(ctx, repo, pw, "", "", repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
oldID := repo.KeyID()
err = switchToNewKeyAndRemoveIfBroken(ctx, repo, id, pw)
if err != nil {
return err
}
h := restic.Handle{Type: restic.KeyFile, Name: oldID.String()}
err = repo.Backend().Remove(ctx, h)
if err != nil {
return err
}
Verbosef("saved new key as %s\n", id)
return nil
}
func switchToNewKeyAndRemoveIfBroken(ctx context.Context, repo *repository.Repository, key *repository.Key, pw string) error {
// Verify new key to make sure it really works. A broken key can render the
// whole repository inaccessible
err := repo.SearchKey(ctx, pw, 0, key.ID().String())
if err != nil {
// the key is invalid, try to remove it
h := restic.Handle{Type: restic.KeyFile, Name: key.ID().String()}
_ = repo.Backend().Remove(ctx, h)
return errors.Fatalf("failed to access repository with new key: %v", err)
}
return nil
}
func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
if len(args) < 1 || (args[0] == "remove" && len(args) != 2) || (args[0] != "remove" && len(args) != 1) {
return errors.Fatal("wrong number of arguments")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
switch args[0] {
case "list":
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
return listKeys(ctx, repo, gopts)
case "add":
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return addKey(ctx, repo, gopts)
case "remove":
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
id, err := restic.Find(ctx, repo.Backend(), restic.KeyFile, args[1])
if err != nil {
return err
}
return deleteKey(ctx, repo, id)
case "passwd":
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return changePassword(ctx, repo, gopts)
}
return nil
}
func loadPasswordFromFile(pwdFile string) (string, error) {
s, err := os.ReadFile(pwdFile)
if os.IsNotExist(err) {
return "", errors.Fatalf("%s does not exist", pwdFile)
}
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}

128
cmd/restic/cmd_key_add.go Normal file
View File

@@ -0,0 +1,128 @@
package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/spf13/cobra"
)
var cmdKeyAdd = &cobra.Command{
Use: "add",
Short: "Add a new key (password) to the repository; returns the new key ID",
Long: `
The "add" sub-command creates a new key and validates the key. Returns the new key ID.
EXIT STATUS
===========
Exit status is 0 if the command is successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKeyAdd(cmd.Context(), globalOptions, keyAddOpts, args)
},
}
type KeyAddOptions struct {
NewPasswordFile string
Username string
Hostname string
}
var keyAddOpts KeyAddOptions
func init() {
cmdKey.AddCommand(cmdKeyAdd)
flags := cmdKeyAdd.Flags()
flags.StringVarP(&keyAddOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password")
flags.StringVarP(&keyAddOpts.Username, "user", "", "", "the username for new key")
flags.StringVarP(&keyAddOpts.Hostname, "host", "", "", "the hostname for new key")
}
func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, args []string) error {
if len(args) > 0 {
return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return addKey(ctx, repo, gopts, opts)
}
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error {
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
if err != nil {
return err
}
id, err := repository.AddKey(ctx, repo, pw, opts.Username, opts.Hostname, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
err = switchToNewKeyAndRemoveIfBroken(ctx, repo, id, pw)
if err != nil {
return err
}
Verbosef("saved new key with ID %s\n", id.ID())
return nil
}
// testKeyNewPassword is used to set a new password during integration testing.
var testKeyNewPassword string
func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) {
if testKeyNewPassword != "" {
return testKeyNewPassword, nil
}
if newPasswordFile != "" {
return loadPasswordFromFile(newPasswordFile)
}
// Since we already have an open repository, temporary remove the password
// to prompt the user for the passwd.
newopts := gopts
newopts.password = ""
return ReadPasswordTwice(newopts,
"enter new password: ",
"enter password again: ")
}
func loadPasswordFromFile(pwdFile string) (string, error) {
s, err := os.ReadFile(pwdFile)
if os.IsNotExist(err) {
return "", errors.Fatalf("%s does not exist", pwdFile)
}
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
func switchToNewKeyAndRemoveIfBroken(ctx context.Context, repo *repository.Repository, key *repository.Key, pw string) error {
// Verify new key to make sure it really works. A broken key can render the
// whole repository inaccessible
err := repo.SearchKey(ctx, pw, 0, key.ID().String())
if err != nil {
// the key is invalid, try to remove it
_ = repository.RemoveKey(ctx, repo, key.ID())
return errors.Fatalf("failed to access repository with new key: %v", err)
}
return nil
}

View File

@@ -4,16 +4,17 @@ import (
"bufio"
"context"
"regexp"
"strings"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
buf, err := withCaptureStdout(func() error {
return runKey(context.TODO(), gopts, []string{"list"})
return runKeyList(context.TODO(), gopts, []string{})
})
rtest.OK(t, err)
@@ -36,21 +37,20 @@ func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions)
testKeyNewPassword = ""
}()
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
rtest.OK(t, runKeyAdd(context.TODO(), gopts, KeyAddOptions{}, []string{}))
}
func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
testKeyNewPassword = "john's geheimnis"
defer func() {
testKeyNewPassword = ""
keyUsername = ""
keyHostname = ""
}()
rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
t.Log("adding key for john@example.com")
rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
rtest.OK(t, runKeyAdd(context.TODO(), gopts, KeyAddOptions{
Username: "john",
Hostname: "example.com",
}, []string{}))
repo, err := OpenRepository(context.TODO(), gopts)
rtest.OK(t, err)
@@ -67,13 +67,13 @@ func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
testKeyNewPassword = ""
}()
rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
rtest.OK(t, runKeyPasswd(context.TODO(), gopts, KeyPasswdOptions{}, []string{}))
}
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
for _, id := range IDs {
rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
rtest.OK(t, runKeyRemove(context.TODO(), gopts, []string{id}))
}
}
@@ -103,18 +103,18 @@ func TestKeyAddRemove(t *testing.T) {
env.gopts.password = passwordList[len(passwordList)-1]
t.Logf("testing access with last password %q\n", env.gopts.password)
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
rtest.OK(t, runKeyList(context.TODO(), env.gopts, []string{}))
testRunCheck(t, env.gopts)
testRunKeyAddNewKeyUserHost(t, env.gopts)
}
type emptySaveBackend struct {
restic.Backend
backend.Backend
}
func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
func (b *emptySaveBackend) Save(ctx context.Context, h backend.Handle, _ backend.RewindReader) error {
return b.Backend.Save(ctx, h, backend.NewByteReader([]byte{}, nil))
}
func TestKeyProblems(t *testing.T) {
@@ -122,7 +122,7 @@ func TestKeyProblems(t *testing.T) {
defer cleanup()
testRunInit(t, env.gopts)
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
return &emptySaveBackend{r}, nil
}
@@ -131,15 +131,45 @@ func TestKeyProblems(t *testing.T) {
testKeyNewPassword = ""
}()
err := runKey(context.TODO(), env.gopts, []string{"passwd"})
err := runKeyPasswd(context.TODO(), env.gopts, KeyPasswdOptions{}, []string{})
t.Log(err)
rtest.Assert(t, err != nil, "expected passwd change to fail")
err = runKey(context.TODO(), env.gopts, []string{"add"})
err = runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{}, []string{})
t.Log(err)
rtest.Assert(t, err != nil, "expected key adding to fail")
t.Logf("testing access with initial password %q\n", env.gopts.password)
rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
rtest.OK(t, runKeyList(context.TODO(), env.gopts, []string{}))
testRunCheck(t, env.gopts)
}
func TestKeyCommandInvalidArguments(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testRunInit(t, env.gopts)
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
return &emptySaveBackend{r}, nil
}
err := runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{}, []string{"johndoe"})
t.Log(err)
rtest.Assert(t, err != nil && strings.Contains(err.Error(), "no arguments"), "unexpected error for key add: %v", err)
err = runKeyPasswd(context.TODO(), env.gopts, KeyPasswdOptions{}, []string{"johndoe"})
t.Log(err)
rtest.Assert(t, err != nil && strings.Contains(err.Error(), "no arguments"), "unexpected error for key passwd: %v", err)
err = runKeyList(context.TODO(), env.gopts, []string{"johndoe"})
t.Log(err)
rtest.Assert(t, err != nil && strings.Contains(err.Error(), "no arguments"), "unexpected error for key list: %v", err)
err = runKeyRemove(context.TODO(), env.gopts, []string{})
t.Log(err)
rtest.Assert(t, err != nil && strings.Contains(err.Error(), "one argument"), "unexpected error for key remove: %v", err)
err = runKeyRemove(context.TODO(), env.gopts, []string{"john", "doe"})
t.Log(err)
rtest.Assert(t, err != nil && strings.Contains(err.Error(), "one argument"), "unexpected error for key remove: %v", err)
}

112
cmd/restic/cmd_key_list.go Normal file
View File

@@ -0,0 +1,112 @@
package main
import (
"context"
"encoding/json"
"fmt"
"sync"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/table"
"github.com/spf13/cobra"
)
var cmdKeyList = &cobra.Command{
Use: "list",
Short: "List keys (passwords)",
Long: `
The "list" sub-command lists all the keys (passwords) associated with the repository.
Returns the key ID, username, hostname, created time and if it's the current key being
used to access the repository.
EXIT STATUS
===========
Exit status is 0 if the command is successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKeyList(cmd.Context(), globalOptions, args)
},
}
func init() {
cmdKey.AddCommand(cmdKeyList)
}
func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error {
if len(args) > 0 {
return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
return listKeys(ctx, repo, gopts)
}
func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions) error {
type keyInfo struct {
Current bool `json:"current"`
ID string `json:"id"`
UserName string `json:"userName"`
HostName string `json:"hostName"`
Created string `json:"created"`
}
var m sync.Mutex
var keys []keyInfo
err := restic.ParallelList(ctx, s, restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, _ int64) error {
k, err := repository.LoadKey(ctx, s, id)
if err != nil {
Warnf("LoadKey() failed: %v\n", err)
return nil
}
key := keyInfo{
Current: id == s.KeyID(),
ID: id.Str(),
UserName: k.Username,
HostName: k.Hostname,
Created: k.Created.Local().Format(TimeFormat),
}
m.Lock()
defer m.Unlock()
keys = append(keys, key)
return nil
})
if err != nil {
return err
}
if gopts.JSON {
return json.NewEncoder(globalOptions.stdout).Encode(keys)
}
tab := table.New()
tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}")
tab.AddColumn("User", "{{ .UserName }}")
tab.AddColumn("Host", "{{ .HostName }}")
tab.AddColumn("Created", "{{ .Created }}")
for _, key := range keys {
tab.AddRow(key)
}
return tab.Write(globalOptions.stdout)
}

View File

@@ -0,0 +1,89 @@
package main
import (
"context"
"fmt"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/spf13/cobra"
)
var cmdKeyPasswd = &cobra.Command{
Use: "passwd",
Short: "Change key (password); creates a new key ID and removes the old key ID, returns new key ID",
Long: `
The "passwd" sub-command creates a new key, validates the key and remove the old key ID.
Returns the new key ID.
EXIT STATUS
===========
Exit status is 0 if the command is successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKeyPasswd(cmd.Context(), globalOptions, keyPasswdOpts, args)
},
}
type KeyPasswdOptions struct {
KeyAddOptions
}
var keyPasswdOpts KeyPasswdOptions
func init() {
cmdKey.AddCommand(cmdKeyPasswd)
flags := cmdKeyPasswd.Flags()
flags.StringVarP(&keyPasswdOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password")
flags.StringVarP(&keyPasswdOpts.Username, "user", "", "", "the username for new key")
flags.StringVarP(&keyPasswdOpts.Hostname, "host", "", "", "the hostname for new key")
}
func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOptions, args []string) error {
if len(args) > 0 {
return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return changePassword(ctx, repo, gopts, opts)
}
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error {
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
if err != nil {
return err
}
id, err := repository.AddKey(ctx, repo, pw, "", "", repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
oldID := repo.KeyID()
err = switchToNewKeyAndRemoveIfBroken(ctx, repo, id, pw)
if err != nil {
return err
}
err = repository.RemoveKey(ctx, repo, oldID)
if err != nil {
return err
}
Verbosef("saved new key as %s\n", id)
return nil
}

View File

@@ -0,0 +1,73 @@
package main
import (
"context"
"fmt"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
)
var cmdKeyRemove = &cobra.Command{
Use: "remove [ID]",
Short: "Remove key ID (password) from the repository.",
Long: `
The "remove" sub-command removes the selected key ID. The "remove" command does not allow
removing the current key being used to access the repository.
EXIT STATUS
===========
Exit status is 0 if the command is successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runKeyRemove(cmd.Context(), globalOptions, args)
},
}
func init() {
cmdKey.AddCommand(cmdKeyRemove)
}
func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return fmt.Errorf("key remove expects one argument as the key id")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
idPrefix := args[0]
return deleteKey(ctx, repo, idPrefix)
}
func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error {
id, err := restic.Find(ctx, repo, restic.KeyFile, idPrefix)
if err != nil {
return err
}
if id == repo.KeyID() {
return errors.Fatal("refusing to remove key currently used to access repository")
}
err = repository.RemoveKey(ctx, repo, id)
if err != nil {
return err
}
Verbosef("removed key %v\n", id)
return nil
}

View File

@@ -23,7 +23,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runList(cmd.Context(), cmd, globalOptions, args)
return runList(cmd.Context(), globalOptions, args)
},
}
@@ -31,9 +31,9 @@ func init() {
cmdRoot.AddCommand(cmdList)
}
func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return errors.Fatal("type not specified, usage: " + cmd.Use)
return errors.Fatal("type not specified")
}
repo, err := OpenRepository(ctx, gopts)
@@ -63,7 +63,7 @@ func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args
case "locks":
t = restic.LockFile
case "blobs":
return index.ForAllIndexes(ctx, repo.Backend(), repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, _ bool, err error) error {
if err != nil {
return err
}
@@ -76,7 +76,7 @@ func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args
return errors.Fatal("invalid type")
}
return repo.List(ctx, t, func(id restic.ID, size int64) error {
return repo.List(ctx, t, func(id restic.ID, _ int64) error {
Printf("%s\n", id)
return nil
})

View File

@@ -12,7 +12,7 @@ import (
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
buf, err := withCaptureStdout(func() error {
return runList(context.TODO(), cmdList, opts, []string{tpe})
return runList(context.TODO(), opts, []string{tpe})
})
rtest.OK(t, err)
return parseIDsFromReader(t, buf)

View File

@@ -3,13 +3,14 @@ package main
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
@@ -52,6 +53,7 @@ type LsOptions struct {
restic.SnapshotFilter
Recursive bool
HumanReadable bool
Ncdu bool
}
var lsOptions LsOptions
@@ -64,16 +66,47 @@ func init() {
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
flags.BoolVar(&lsOptions.Ncdu, "ncdu", false, "output NCDU export format (pipe into 'ncdu -f -')")
}
type lsSnapshot struct {
*restic.Snapshot
ID *restic.ID `json:"id"`
ShortID string `json:"short_id"`
StructType string `json:"struct_type"` // "snapshot"
type lsPrinter interface {
Snapshot(sn *restic.Snapshot)
Node(path string, node *restic.Node)
LeaveDir(path string)
Close()
}
type jsonLsPrinter struct {
enc *json.Encoder
}
func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) {
type lsSnapshot struct {
*restic.Snapshot
ID *restic.ID `json:"id"`
ShortID string `json:"short_id"`
StructType string `json:"struct_type"` // "snapshot"
}
err := p.enc.Encode(lsSnapshot{
Snapshot: sn,
ID: sn.ID(),
ShortID: sn.ID().Str(),
StructType: "snapshot",
})
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
}
// Print node in our custom JSON format, followed by a newline.
func (p *jsonLsPrinter) Node(path string, node *restic.Node) {
err := lsNodeJSON(p.enc, path, node)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
}
func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error {
n := &struct {
Name string `json:"name"`
@@ -115,10 +148,117 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error {
return enc.Encode(n)
}
func (p *jsonLsPrinter) LeaveDir(_ string) {}
func (p *jsonLsPrinter) Close() {}
type ncduLsPrinter struct {
out io.Writer
depth int
}
// lsSnapshotNcdu prints a restic snapshot in Ncdu save format.
// It opens the JSON list. Nodes are added with lsNodeNcdu and the list is closed by lsCloseNcdu.
// Format documentation: https://dev.yorhel.nl/ncdu/jsonfmt
func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) {
const NcduMajorVer = 1
const NcduMinorVer = 2
snapshotBytes, err := json.Marshal(sn)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
p.depth++
fmt.Fprintf(p.out, "[%d, %d, %s", NcduMajorVer, NcduMinorVer, string(snapshotBytes))
}
func lsNcduNode(_ string, node *restic.Node) ([]byte, error) {
type NcduNode struct {
Name string `json:"name"`
Asize uint64 `json:"asize"`
Dsize uint64 `json:"dsize"`
Dev uint64 `json:"dev"`
Ino uint64 `json:"ino"`
NLink uint64 `json:"nlink"`
NotReg bool `json:"notreg"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
Mode uint16 `json:"mode"`
Mtime int64 `json:"mtime"`
}
outNode := NcduNode{
Name: node.Name,
Asize: node.Size,
Dsize: node.Size,
Dev: node.DeviceID,
Ino: node.Inode,
NLink: node.Links,
NotReg: node.Type != "dir" && node.Type != "file",
UID: node.UID,
GID: node.GID,
Mode: uint16(node.Mode & os.ModePerm),
Mtime: node.ModTime.Unix(),
}
// bits according to inode(7) manpage
if node.Mode&os.ModeSetuid != 0 {
outNode.Mode |= 0o4000
}
if node.Mode&os.ModeSetgid != 0 {
outNode.Mode |= 0o2000
}
if node.Mode&os.ModeSticky != 0 {
outNode.Mode |= 0o1000
}
return json.Marshal(outNode)
}
func (p *ncduLsPrinter) Node(path string, node *restic.Node) {
out, err := lsNcduNode(path, node)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
if node.Type == "dir" {
fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out))
p.depth++
} else {
fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out))
}
}
func (p *ncduLsPrinter) LeaveDir(_ string) {
p.depth--
fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth))
}
func (p *ncduLsPrinter) Close() {
fmt.Fprint(p.out, "\n]\n")
}
type textLsPrinter struct {
dirs []string
ListLong bool
HumanReadable bool
}
func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) {
Verbosef("%v filtered by %v:\n", sn, p.dirs)
}
func (p *textLsPrinter) Node(path string, node *restic.Node) {
Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable))
}
func (p *textLsPrinter) LeaveDir(_ string) {}
func (p *textLsPrinter) Close() {}
func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []string) error {
if len(args) == 0 {
return errors.Fatal("no snapshot ID specified, specify snapshot ID or use special ID 'latest'")
}
if opts.Ncdu && gopts.JSON {
return errors.Fatal("only either '--json' or '--ncdu' can be specified")
}
// extract any specific directories to walk
var dirs []string
@@ -170,7 +310,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return err
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -180,38 +320,21 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return err
}
var (
printSnapshot func(sn *restic.Snapshot)
printNode func(path string, node *restic.Node)
)
var printer lsPrinter
if gopts.JSON {
enc := json.NewEncoder(globalOptions.stdout)
printSnapshot = func(sn *restic.Snapshot) {
err := enc.Encode(lsSnapshot{
Snapshot: sn,
ID: sn.ID(),
ShortID: sn.ID().Str(),
StructType: "snapshot",
})
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
printer = &jsonLsPrinter{
enc: json.NewEncoder(globalOptions.stdout),
}
printNode = func(path string, node *restic.Node) {
err := lsNodeJSON(enc, path, node)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
} else if opts.Ncdu {
printer = &ncduLsPrinter{
out: globalOptions.stdout,
}
} else {
printSnapshot = func(sn *restic.Snapshot) {
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
}
printNode = func(path string, node *restic.Node) {
Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))
printer = &textLsPrinter{
dirs: dirs,
ListLong: opts.ListLong,
HumanReadable: opts.HumanReadable,
}
}
@@ -229,44 +352,55 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return err
}
printSnapshot(sn)
printer.Snapshot(sn)
err = walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
processNode := func(_ restic.ID, nodepath string, node *restic.Node, err error) error {
if err != nil {
return false, err
return err
}
if node == nil {
return false, nil
return nil
}
if withinDir(nodepath) {
// if we're within a dir, print the node
printNode(nodepath, node)
printer.Node(nodepath, node)
// if recursive listing is requested, signal the walker that it
// should continue walking recursively
if opts.Recursive {
return false, nil
return nil
}
}
// if there's an upcoming match deeper in the tree (but we're not
// there yet), signal the walker to descend into any subdirs
if approachingMatchingTree(nodepath) {
return false, nil
return nil
}
// otherwise, signal the walker to not walk recursively into any
// subdirs
if node.Type == "dir" {
return false, walker.ErrSkipNode
return walker.ErrSkipNode
}
return false, nil
return nil
}
err = walker.Walk(ctx, repo, *sn.Tree, walker.WalkVisitor{
ProcessNode: processNode,
LeaveDir: func(path string) {
// the root path `/` has no corresponding node and is thus also skipped by processNode
if withinDir(path) && path != "/" {
printer.LeaveDir(path)
}
},
})
if err != nil {
return err
}
printer.Close()
return nil
}

View File

@@ -2,18 +2,46 @@ package main
import (
"context"
"encoding/json"
"path/filepath"
"strings"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
func testRunLsWithOpts(t testing.TB, gopts GlobalOptions, opts LsOptions, args []string) []byte {
buf, err := withCaptureStdout(func() error {
gopts.Quiet = true
opts := LsOptions{}
return runLs(context.TODO(), opts, gopts, []string{snapshotID})
return runLs(context.TODO(), opts, gopts, args)
})
rtest.OK(t, err)
return strings.Split(buf.String(), "\n")
return buf.Bytes()
}
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
out := testRunLsWithOpts(t, gopts, LsOptions{}, []string{snapshotID})
return strings.Split(string(out), "\n")
}
func assertIsValidJSON(t *testing.T, data []byte) {
// Sanity check: output must be valid JSON.
var v interface{}
err := json.Unmarshal(data, &v)
rtest.OK(t, err)
}
func TestRunLsNcdu(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
testRunInit(t, env.gopts)
opts := BackupOptions{}
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest"})
assertIsValidJSON(t, ncdu)
ncdu = testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest", "/testdata"})
assertIsValidJSON(t, ncdu)
}

View File

@@ -11,78 +11,94 @@ import (
rtest "github.com/restic/restic/internal/test"
)
type lsTestNode struct {
path string
restic.Node
}
var lsTestNodes = []lsTestNode{
// Mode is omitted when zero.
// Permissions, by convention is "-" per mode bit
{
path: "/bar/baz",
Node: restic.Node{
Name: "baz",
Type: "file",
Size: 12345,
UID: 10000000,
GID: 20000000,
User: "nobody",
Group: "nobodies",
Links: 1,
},
},
// Even empty files get an explicit size.
{
path: "/foo/empty",
Node: restic.Node{
Name: "empty",
Type: "file",
Size: 0,
UID: 1001,
GID: 1001,
User: "not printed",
Group: "not printed",
Links: 0xF00,
},
},
// Non-regular files do not get a size.
// Mode is printed in decimal, including the type bits.
{
path: "/foo/link",
Node: restic.Node{
Name: "link",
Type: "symlink",
Mode: os.ModeSymlink | 0777,
LinkTarget: "not printed",
},
},
{
path: "/some/directory",
Node: restic.Node{
Name: "directory",
Type: "dir",
Mode: os.ModeDir | 0755,
ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
AccessTime: time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC),
ChangeTime: time.Date(2022, 3, 4, 5, 6, 7, 8, time.UTC),
},
},
// Test encoding of setuid/setgid/sticky bit
{
path: "/some/sticky",
Node: restic.Node{
Name: "sticky",
Type: "dir",
Mode: os.ModeDir | 0755 | os.ModeSetuid | os.ModeSetgid | os.ModeSticky,
},
},
}
func TestLsNodeJSON(t *testing.T) {
for _, c := range []struct {
path string
restic.Node
expect string
}{
// Mode is omitted when zero.
// Permissions, by convention is "-" per mode bit
{
path: "/bar/baz",
Node: restic.Node{
Name: "baz",
Type: "file",
Size: 12345,
UID: 10000000,
GID: 20000000,
User: "nobody",
Group: "nobodies",
Links: 1,
},
expect: `{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
},
// Even empty files get an explicit size.
{
path: "/foo/empty",
Node: restic.Node{
Name: "empty",
Type: "file",
Size: 0,
UID: 1001,
GID: 1001,
User: "not printed",
Group: "not printed",
Links: 0xF00,
},
expect: `{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
},
// Non-regular files do not get a size.
// Mode is printed in decimal, including the type bits.
{
path: "/foo/link",
Node: restic.Node{
Name: "link",
Type: "symlink",
Mode: os.ModeSymlink | 0777,
LinkTarget: "not printed",
},
expect: `{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
},
{
path: "/some/directory",
Node: restic.Node{
Name: "directory",
Type: "dir",
Mode: os.ModeDir | 0755,
ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
AccessTime: time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC),
ChangeTime: time.Date(2022, 3, 4, 5, 6, 7, 8, time.UTC),
},
expect: `{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","struct_type":"node"}`,
},
for i, expect := range []string{
`{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
`{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
`{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
`{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","struct_type":"node"}`,
`{"name":"sticky","type":"dir","path":"/some/sticky","uid":0,"gid":0,"mode":2161115629,"permissions":"dugtrwxr-xr-x","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`,
} {
c := lsTestNodes[i]
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
err := lsNodeJSON(enc, c.path, &c.Node)
rtest.OK(t, err)
rtest.Equals(t, c.expect+"\n", buf.String())
rtest.Equals(t, expect+"\n", buf.String())
// Sanity check: output must be valid JSON.
var v interface{}
@@ -90,3 +106,54 @@ func TestLsNodeJSON(t *testing.T) {
rtest.OK(t, err)
}
}
func TestLsNcduNode(t *testing.T) {
for i, expect := range []string{
`{"name":"baz","asize":12345,"dsize":12345,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":-62135596800}`,
`{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":-62135596800}`,
`{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":-62135596800}`,
`{"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":493,"mtime":1577934245}`,
`{"name":"sticky","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":4077,"mtime":-62135596800}`,
} {
c := lsTestNodes[i]
out, err := lsNcduNode(c.path, &c.Node)
rtest.OK(t, err)
rtest.Equals(t, expect, string(out))
// Sanity check: output must be valid JSON.
var v interface{}
err = json.Unmarshal(out, &v)
rtest.OK(t, err)
}
}
func TestLsNcdu(t *testing.T) {
var buf bytes.Buffer
printer := &ncduLsPrinter{
out: &buf,
}
printer.Snapshot(&restic.Snapshot{
Hostname: "host",
Paths: []string{"/example"},
})
printer.Node("/directory", &restic.Node{
Type: "dir",
Name: "directory",
})
printer.Node("/directory/data", &restic.Node{
Type: "file",
Name: "data",
Size: 42,
})
printer.LeaveDir("/directory")
printer.Close()
rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"},
[
{"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800},
{"name":"data","asize":42,"dsize":42,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}
]
]
`, buf.String())
}

View File

@@ -113,6 +113,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
return errors.Fatal("wrong number of parameters")
}
mountpoint := args[0]
// Check the existence of the mount point at the earliest stage to
// prevent unnecessary computations while opening the repository.
if _, err := resticfs.Stat(mountpoint); errors.Is(err, os.ErrNotExist) {
Verbosef("Mountpoint %s doesn't exist\n", mountpoint)
return err
}
debug.Log("start mount")
defer debug.Log("finish mount")
@@ -136,12 +145,6 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
return err
}
mountpoint := args[0]
if _, err := resticfs.Stat(mountpoint); errors.Is(err, os.ErrNotExist) {
Verbosef("Mountpoint %s doesn't exist\n", mountpoint)
return err
}
mountOptions := []systemFuse.MountOption{
systemFuse.ReadOnly(),
systemFuse.FSName("restic"),

View File

@@ -21,7 +21,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
Hidden: true,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
Run: func(_ *cobra.Command, _ []string) {
fmt.Printf("All Extended Options:\n")
var maxLen int
for _, opt := range options.List() {

View File

@@ -15,6 +15,7 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
"github.com/spf13/cobra"
)
@@ -36,7 +37,7 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
return runPrune(cmd.Context(), pruneOptions, globalOptions)
},
}
@@ -66,10 +67,10 @@ func init() {
f := cmdPrune.Flags()
f.BoolVarP(&pruneOptions.DryRun, "dry-run", "n", false, "do not modify the repository, just print what would be done")
f.StringVarP(&pruneOptions.UnsafeNoSpaceRecovery, "unsafe-recover-no-free-space", "", "", "UNSAFE, READ THE DOCUMENTATION BEFORE USING! Try to recover a repository stuck with no free space. Do not use without trying out 'prune --max-repack-size 0' first.")
addPruneOptions(cmdPrune)
addPruneOptions(cmdPrune, &pruneOptions)
}
func addPruneOptions(c *cobra.Command) {
func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) {
f := c.Flags()
f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')")
f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)")
@@ -100,7 +101,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
// parse MaxUnused either as unlimited, a percentage, or an absolute number of bytes
switch {
case maxUnused == "unlimited":
opts.maxUnusedBytes = func(used uint64) uint64 {
opts.maxUnusedBytes = func(_ uint64) uint64 {
return math.MaxUint64
}
@@ -129,7 +130,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
}
opts.maxUnusedBytes = func(used uint64) uint64 {
opts.maxUnusedBytes = func(_ uint64) uint64 {
return uint64(size)
}
}
@@ -152,7 +153,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
return err
}
if repo.Backend().Connections() < 2 {
if repo.Connections() < 2 {
return errors.Fatal("prune requires a backend connection limit of at least two")
}
@@ -406,7 +407,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
})
// if duplicate blobs exist, those will be set to either "used" or "unused":
// - mark only one occurence of duplicate blobs as used
// - mark only one occurrence of duplicate blobs as used
// - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used"
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
if hasDuplicates {
@@ -415,7 +416,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
bh := blob.BlobHandle
count, ok := usedBlobs[bh]
// skip non-duplicate, aka. normal blobs
// count == 0 is used to mark that this was a duplicate blob with only a single occurence remaining
// count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining
if !ok || count == 1 {
return
}
@@ -424,7 +425,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
size := uint64(blob.Length)
switch {
case ip.usedBlobs > 0, count == 0:
// other used blobs in pack or "last" occurence -> transition to used
// other used blobs in pack or "last" occurrence -> transition to used
ip.usedSize += size
ip.usedBlobs++
ip.unusedSize -= size
@@ -434,7 +435,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
stats.blobs.used++
stats.size.duplicate -= size
stats.blobs.duplicate--
// let other occurences remain marked as unused
// let other occurrences remain marked as unused
usedBlobs[bh] = 1
default:
// remain unused and decrease counter
@@ -766,7 +767,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
return errors.Fatalf("%s", err)
}
} else if len(plan.ignorePacks) != 0 {
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil)
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false)
if err != nil {
return errors.Fatalf("%s", err)
}
@@ -778,7 +779,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
}
if opts.unsafeRecovery {
_, err = writeIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil)
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true)
if err != nil {
return errors.Fatalf("%s", err)
}
@@ -788,29 +789,28 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
return nil
}
func writeIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs) (restic.IDSet, error) {
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error {
Verbosef("rebuilding index\n")
bar := newProgressMax(!gopts.Quiet, 0, "packs processed")
obsoleteIndexes, err := repo.Index().Save(ctx, repo, removePacks, extraObsolete, bar)
bar.Done()
return obsoleteIndexes, err
}
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs) error {
obsoleteIndexes, err := writeIndexFiles(ctx, gopts, repo, removePacks, extraObsolete)
if err != nil {
return err
}
Verbosef("deleting obsolete index files\n")
return DeleteFilesChecked(ctx, gopts, repo, obsoleteIndexes, restic.IndexFile)
return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{
SaveProgress: bar,
DeleteProgress: func() *progress.Counter {
return newProgressMax(!gopts.Quiet, 0, "old indexes deleted")
},
DeleteReport: func(id restic.ID, _ error) {
if gopts.verbosity > 2 {
Verbosef("removed index %v\n", id.String())
}
},
SkipDeletion: skipDeletion,
})
}
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) {
var snapshotTrees restic.IDs
Verbosef("loading all snapshots...\n")
err = restic.ForAllSnapshots(ctx, repo.Backend(), repo, ignoreSnapshots,
err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots,
func(id restic.ID, sn *restic.Snapshot, err error) error {
if err != nil {
debug.Log("failed to load snapshot %v (error %v)", id, err)

View File

@@ -6,13 +6,13 @@ import (
"path/filepath"
"testing"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/backend"
rtest "github.com/restic/restic/internal/test"
)
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
oldHook := gopts.backendTestHook
gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return newListOnceBackend(r), nil }
defer func() {
gopts.backendTestHook = oldHook
}()
@@ -81,7 +81,10 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
DryRun: true,
Last: 1,
}
return runForget(context.TODO(), opts, gopts, args)
pruneOpts := PruneOptions{
MaxUnused: "5%",
}
return runForget(context.TODO(), opts, pruneOpts, gopts, args)
})
rtest.OK(t, err)
@@ -130,7 +133,7 @@ func TestPruneWithDamagedRepository(t *testing.T) {
removePacksExcept(env.gopts, t, oldPacks, false)
oldHook := env.gopts.backendTestHook
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return newListOnceBackend(r), nil }
defer func() {
env.gopts.backendTestHook = oldHook
}()

View File

@@ -5,7 +5,6 @@ import (
"os"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
@@ -26,7 +25,7 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
return runRecover(cmd.Context(), globalOptions)
},
}
@@ -52,7 +51,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
return err
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -92,7 +91,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
bar.Done()
Verbosef("load snapshots\n")
err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(id restic.ID, sn *restic.Snapshot, err error) error {
err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(_ restic.ID, sn *restic.Snapshot, _ error) error {
trees[*sn.Tree] = true
return nil
})
@@ -159,7 +158,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
}
func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.Repository, tree *restic.ID) error {
func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.SaverUnpacked, tree *restic.ID) error {
sn, err := restic.NewSnapshot([]string{name}, tags, hostname, time.Now())
if err != nil {
return errors.Fatalf("unable to save snapshot: %v", err)

View File

@@ -24,7 +24,7 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
},
}
@@ -78,7 +78,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti
if opts.ReadAllPacks {
// get list of old index files but start with empty index
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error {
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error {
obsoleteIndexes = append(obsoleteIndexes, id)
return nil
})
@@ -88,7 +88,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti
} else {
Verbosef("loading indexes...\n")
mi := index.NewMasterIndex()
err := index.ForAllIndexes(ctx, repo.Backend(), repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error {
if err != nil {
Warnf("removing invalid index %v: %v\n", id, err)
obsoleteIndexes = append(obsoleteIndexes, id)
@@ -154,7 +154,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti
}
}
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes)
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false)
if err != nil {
return err
}

View File

@@ -8,6 +8,7 @@ import (
"sync"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/index"
"github.com/restic/restic/internal/restic"
@@ -70,12 +71,12 @@ func TestRebuildIndexAlwaysFull(t *testing.T) {
// indexErrorBackend modifies the first index after reading.
type indexErrorBackend struct {
restic.Backend
backend.Backend
lock sync.Mutex
hasErred bool
}
func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
func (b *indexErrorBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
// protect hasErred
b.lock.Lock()
@@ -101,7 +102,7 @@ func (erd errorReadCloser) Read(p []byte) (int, error) {
}
func TestRebuildIndexDamage(t *testing.T) {
testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
testRebuildIndex(t, func(r backend.Backend) (backend.Backend, error) {
return &indexErrorBackend{
Backend: r,
}, nil
@@ -109,11 +110,11 @@ func TestRebuildIndexDamage(t *testing.T) {
}
type appendOnlyBackend struct {
restic.Backend
backend.Backend
}
// called via repo.Backend().Remove()
func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error {
func (b *appendOnlyBackend) Remove(_ context.Context, h backend.Handle) error {
return errors.Errorf("Failed to remove %v", h)
}
@@ -127,7 +128,7 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
err := withRestoreGlobalOptions(func() error {
globalOptions.stdout = io.Discard
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
return &appendOnlyBackend{r}, nil
}
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)

View File

@@ -5,11 +5,12 @@ import (
"io"
"os"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/termstatus"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
var cmdRepairPacks = &cobra.Command{
@@ -28,7 +29,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runRepairPacks(cmd.Context(), globalOptions, args)
term, cancel := setupTermstatus()
defer cancel()
return runRepairPacks(cmd.Context(), globalOptions, term, args)
},
}
@@ -36,14 +39,7 @@ func init() {
cmdRepair.AddCommand(cmdRepairPacks)
}
func runRepairPacks(ctx context.Context, gopts GlobalOptions, args []string) error {
// FIXME discuss and add proper feature flag mechanism
flag, _ := os.LookupEnv("RESTIC_FEATURES")
if flag != "repair-packs-v1" {
return errors.Fatal("This command is experimental and may change/be removed without notice between restic versions. " +
"Set the environment variable 'RESTIC_FEATURES=repair-packs-v1' to enable it.")
}
func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
ids := restic.NewIDSet()
for _, arg := range args {
id, err := restic.ParseID(arg)
@@ -67,24 +63,22 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, args []string) err
return err
}
return repairPacks(ctx, gopts, repo, ids)
}
func repairPacks(ctx context.Context, gopts GlobalOptions, repo *repository.Repository, ids restic.IDSet) error {
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err := repo.LoadIndex(ctx, bar)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return errors.Fatalf("%s", err)
}
Warnf("saving backup copies of pack files in current folder\n")
printer := newTerminalProgressPrinter(gopts.verbosity, term)
printer.P("saving backup copies of pack files to current folder")
for id := range ids {
f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666)
if err != nil {
return errors.Fatalf("%s", err)
return err
}
err = repo.Backend().Load(ctx, restic.Handle{Type: restic.PackFile, Name: id.String()}, 0, 0, func(rd io.Reader) error {
err = repo.Backend().Load(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}, 0, 0, func(rd io.Reader) error {
_, err := f.Seek(0, 0)
if err != nil {
return err
@@ -93,66 +87,15 @@ func repairPacks(ctx context.Context, gopts GlobalOptions, repo *repository.Repo
return err
})
if err != nil {
return errors.Fatalf("%s", err)
return err
}
}
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
repo.DisableAutoIndexUpdate()
Warnf("salvaging intact data from specified pack files\n")
bar = newProgressMax(!gopts.Quiet, uint64(len(ids)), "pack files")
defer bar.Done()
wg.Go(func() error {
// examine all data the indexes have for the pack file
for b := range repo.Index().ListPacks(wgCtx, ids) {
blobs := b.Blobs
if len(blobs) == 0 {
Warnf("no blobs found for pack %v\n", b.PackID)
bar.Add(1)
continue
}
err = repository.StreamPack(wgCtx, repo.Backend().Load, repo.Key(), b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
if err != nil {
// Fallback path
buf, err = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil)
if err != nil {
Warnf("failed to load blob %v: %v\n", blob.ID, err)
return nil
}
}
id, _, _, err := repo.SaveBlob(wgCtx, blob.Type, buf, restic.ID{}, true)
if !id.Equal(blob.ID) {
panic("pack id mismatch during upload")
}
return err
})
if err != nil {
return err
}
bar.Add(1)
}
return repo.Flush(wgCtx)
})
if err := wg.Wait(); err != nil {
return errors.Fatalf("%s", err)
}
bar.Done()
// remove salvaged packs from index
err = rebuildIndexFiles(ctx, gopts, repo, ids, nil)
err = repository.RepairPacks(ctx, repo, ids, printer)
if err != nil {
return errors.Fatalf("%s", err)
}
// cleanup
Warnf("removing salvaged pack files\n")
DeleteFiles(ctx, gopts, repo, ids, restic.PackFile)
Warnf("\nUse `restic repair snapshots --forget` to remove the corrupted data blobs from all snapshots\n")
return nil
}

View File

@@ -3,7 +3,6 @@ package main
import (
"context"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/walker"
@@ -84,7 +83,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
repo.SetDryRun()
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -126,7 +125,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
node.Size = newSize
return node
},
RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) {
RewriteFailedTree: func(_ restic.ID, path string, _ error) (restic.ID, error) {
if path == "/" {
Verbosef(" dir %q: not readable\n", path)
// remove snapshots with invalid root node
@@ -145,11 +144,11 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
changedCount := 0
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
changed, err := filterAndReplaceSnapshot(ctx, repo, sn,
func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
}, opts.DryRun, opts.Forget, "repaired")
}, opts.DryRun, opts.Forget, nil, "repaired")
if err != nil {
return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)
}

View File

@@ -3,7 +3,6 @@ package main
import (
"context"
"strings"
"sync"
"time"
"github.com/restic/restic/internal/debug"
@@ -38,31 +37,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
var wg sync.WaitGroup
cancelCtx, cancel := context.WithCancel(ctx)
defer func() {
// shutdown termstatus
cancel()
wg.Wait()
}()
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// allow usage of warnf / verbosef
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
defer func() {
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
}()
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return runRestore(ctx, restoreOptions, globalOptions, term, args)
term, cancel := setupTermstatus()
defer cancel()
return runRestore(cmd.Context(), restoreOptions, globalOptions, term, args)
},
}
@@ -168,7 +145,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
}).FindLatest(ctx, repo.Backend(), repo, snapshotIDString)
}).FindLatest(ctx, repo, repo, snapshotIDString)
if err != nil {
return errors.Fatalf("failed to find snapshot: %v", err)
}
@@ -204,7 +181,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
excludePatterns := filter.ParsePatterns(opts.Exclude)
insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude)
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, err := filter.List(excludePatterns, item)
if err != nil {
msg.E("error for exclude pattern: %v", err)
@@ -227,7 +204,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
includePatterns := filter.ParsePatterns(opts.Include)
insensitiveIncludePatterns := filter.ParsePatterns(opts.InsensitiveInclude)
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
if err != nil {
msg.E("error for include pattern: %v", err)

View File

@@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"time"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
@@ -46,11 +47,42 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
},
}
type snapshotMetadata struct {
Hostname string
Time *time.Time
}
type snapshotMetadataArgs struct {
Hostname string
Time string
}
func (sma snapshotMetadataArgs) empty() bool {
return sma.Hostname == "" && sma.Time == ""
}
func (sma snapshotMetadataArgs) convert() (*snapshotMetadata, error) {
if sma.empty() {
return nil, nil
}
var timeStamp *time.Time
if sma.Time != "" {
t, err := time.ParseInLocation(TimeFormat, sma.Time, time.Local)
if err != nil {
return nil, errors.Fatalf("error in time option: %v\n", err)
}
timeStamp = &t
}
return &snapshotMetadata{Hostname: sma.Hostname, Time: timeStamp}, nil
}
// RewriteOptions collects all options for the rewrite command.
type RewriteOptions struct {
Forget bool
DryRun bool
Metadata snapshotMetadataArgs
restic.SnapshotFilter
excludePatternOptions
}
@@ -63,11 +95,15 @@ func init() {
f := cmdRewrite.Flags()
f.BoolVarP(&rewriteOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
f.BoolVarP(&rewriteOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
f.StringVar(&rewriteOptions.Metadata.Hostname, "new-host", "", "replace hostname")
f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup")
initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true)
initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions)
}
type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error)
func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, opts RewriteOptions) (bool, error) {
if sn.Tree == nil {
return false, errors.Errorf("snapshot %v has nil tree", sn.ID().Str())
@@ -78,33 +114,50 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
return false, err
}
selectByName := func(nodepath string) bool {
for _, reject := range rejectByNameFuncs {
if reject(nodepath) {
return false
}
}
return true
metadata, err := opts.Metadata.convert()
if err != nil {
return false, err
}
rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
RewriteNode: func(node *restic.Node, path string) *restic.Node {
if selectByName(path) {
return node
var filter rewriteFilterFunc
if len(rejectByNameFuncs) > 0 {
selectByName := func(nodepath string) bool {
for _, reject := range rejectByNameFuncs {
if reject(nodepath) {
return false
}
}
Verbosef(fmt.Sprintf("excluding %s\n", path))
return nil
},
DisableNodeCache: true,
})
return true
}
rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
RewriteNode: func(node *restic.Node, path string) *restic.Node {
if selectByName(path) {
return node
}
Verbosef(fmt.Sprintf("excluding %s\n", path))
return nil
},
DisableNodeCache: true,
})
filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
}
} else {
filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, error) {
return *sn.Tree, nil
}
}
return filterAndReplaceSnapshot(ctx, repo, sn,
func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
}, opts.DryRun, opts.Forget, "rewrite")
filter, opts.DryRun, opts.Forget, metadata, "rewrite")
}
func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, filter func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error), dryRun bool, forget bool, addTag string) (bool, error) {
func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *restic.Snapshot,
filter rewriteFilterFunc, dryRun bool, forget bool, newMetadata *snapshotMetadata, addTag string) (bool, error) {
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
@@ -128,7 +181,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
if dryRun {
Verbosef("would delete empty snapshot\n")
} else {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(ctx, h); err != nil {
return false, err
}
@@ -138,7 +191,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
return true, nil
}
if filteredTree == *sn.Tree {
if filteredTree == *sn.Tree && newMetadata == nil {
debug.Log("Snapshot %v not modified", sn)
return false, nil
}
@@ -151,6 +204,14 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
Verbosef("would remove old snapshot\n")
}
if newMetadata != nil && newMetadata.Time != nil {
Verbosef("would set time to %s\n", newMetadata.Time)
}
if newMetadata != nil && newMetadata.Hostname != "" {
Verbosef("would set hostname to %s\n", newMetadata.Hostname)
}
return true, nil
}
@@ -162,6 +223,16 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
sn.AddTags([]string{addTag})
}
if newMetadata != nil && newMetadata.Time != nil {
Verbosef("setting time to %s\n", *newMetadata.Time)
sn.Time = *newMetadata.Time
}
if newMetadata != nil && newMetadata.Hostname != "" {
Verbosef("setting host to %s\n", newMetadata.Hostname)
sn.Hostname = newMetadata.Hostname
}
// Save the new snapshot.
id, err := restic.SaveSnapshot(ctx, repo, sn)
if err != nil {
@@ -170,7 +241,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
Verbosef("saved new snapshot %v\n", id.Str())
if forget {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(ctx, h); err != nil {
return false, err
}
@@ -181,8 +252,8 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
}
func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error {
if opts.excludePatternOptions.Empty() {
return errors.Fatal("Nothing to do: no excludes provided")
if opts.excludePatternOptions.Empty() && opts.Metadata.empty() {
return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided")
}
repo, err := OpenRepository(ctx, gopts)
@@ -207,7 +278,7 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
repo.SetDryRun()
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -219,7 +290,7 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
changedCount := 0
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
Verbosef("\n%v\n", sn)
changed, err := rewriteSnapshot(ctx, repo, sn, opts)
if err != nil {
return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)

View File

@@ -9,12 +9,13 @@ import (
rtest "github.com/restic/restic/internal/test"
)
func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool) {
func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) {
opts := RewriteOptions{
excludePatternOptions: excludePatternOptions{
Excludes: excludes,
},
Forget: forget,
Forget: forget,
Metadata: metadata,
}
rtest.OK(t, runRewrite(context.TODO(), opts, gopts, nil))
@@ -38,7 +39,7 @@ func TestRewrite(t *testing.T) {
createBasicRewriteRepo(t, env)
// exclude some data
testRunRewriteExclude(t, env.gopts, []string{"3"}, false)
testRunRewriteExclude(t, env.gopts, []string{"3"}, false, snapshotMetadataArgs{Hostname: "", Time: ""})
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs)
testRunCheck(t, env.gopts)
@@ -50,7 +51,7 @@ func TestRewriteUnchanged(t *testing.T) {
snapshotID := createBasicRewriteRepo(t, env)
// use an exclude that will not exclude anything
testRunRewriteExclude(t, env.gopts, []string{"3dflkhjgdflhkjetrlkhjgfdlhkj"}, false)
testRunRewriteExclude(t, env.gopts, []string{"3dflkhjgdflhkjetrlkhjgfdlhkj"}, false, snapshotMetadataArgs{Hostname: "", Time: ""})
newSnapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(newSnapshotIDs) == 1, "expected one snapshot, got %v", newSnapshotIDs)
rtest.Assert(t, snapshotID == newSnapshotIDs[0], "snapshot id changed unexpectedly")
@@ -63,11 +64,44 @@ func TestRewriteReplace(t *testing.T) {
snapshotID := createBasicRewriteRepo(t, env)
// exclude some data
testRunRewriteExclude(t, env.gopts, []string{"3"}, true)
newSnapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(newSnapshotIDs) == 1, "expected one snapshot, got %v", newSnapshotIDs)
testRunRewriteExclude(t, env.gopts, []string{"3"}, true, snapshotMetadataArgs{Hostname: "", Time: ""})
newSnapshotIDs := testListSnapshots(t, env.gopts, 1)
rtest.Assert(t, snapshotID != newSnapshotIDs[0], "snapshot id should have changed")
// check forbids unused blobs, thus remove them first
testRunPrune(t, env.gopts, PruneOptions{MaxUnused: "0"})
testRunCheck(t, env.gopts)
}
func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
createBasicRewriteRepo(t, env)
testRunRewriteExclude(t, env.gopts, []string{}, true, metadata)
repo, _ := OpenRepository(context.TODO(), env.gopts)
snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil)
rtest.OK(t, err)
rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots))
newSnapshot := snapshots[0]
if metadata.Time != "" {
rtest.Assert(t, newSnapshot.Time.Format(TimeFormat) == metadata.Time, "New snapshot should have time %s", metadata.Time)
}
if metadata.Hostname != "" {
rtest.Assert(t, newSnapshot.Hostname == metadata.Hostname, "New snapshot should have host %s", metadata.Hostname)
}
}
func TestRewriteMetadata(t *testing.T) {
newHost := "new host"
newTime := "1999-01-01 11:11:11"
for _, metadata := range []snapshotMetadataArgs{
{Hostname: "", Time: newTime},
{Hostname: newHost, Time: ""},
{Hostname: newHost, Time: newTime},
} {
testRewriteMetadata(t, metadata)
}
}

View File

@@ -73,7 +73,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
}
var snapshots restic.Snapshots
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
@@ -290,7 +290,7 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
return nil
}
// Snapshot helps to print Snaphots as JSON with their ID included.
// Snapshot helps to print Snapshots as JSON with their ID included.
type Snapshot struct {
*restic.Snapshot

View File

@@ -8,10 +8,10 @@ import (
"strings"
"github.com/restic/chunker"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/table"
"github.com/restic/restic/internal/walker"
@@ -94,7 +94,7 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
}
}
snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
@@ -189,7 +189,7 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
return nil
}
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Loader, opts StatsOptions, stats *statsContainer) error {
if snapshot.Tree == nil {
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
}
@@ -202,8 +202,10 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
}
uniqueInodes := make(map[uint64]struct{})
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, uniqueInodes))
hardLinkIndex := restorer.NewHardlinkIndex[struct{}]()
err := walker.Walk(ctx, repo, *snapshot.Tree, walker.WalkVisitor{
ProcessNode: statsWalkTree(repo, opts, stats, hardLinkIndex),
})
if err != nil {
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
}
@@ -211,13 +213,13 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
return nil
}
func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, hardLinkIndex *restorer.HardlinkIndex[struct{}]) walker.WalkFunc {
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) error {
if nodeErr != nil {
return true, nodeErr
return nodeErr
}
if node == nil {
return true, nil
return nil
}
if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
@@ -247,7 +249,7 @@ func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContai
// is always a data blob since we're accessing it via a file's Content array
blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob)
if !found {
return true, fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID)
return fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID)
}
// count the blob's size, then add this blob by this
@@ -270,15 +272,13 @@ func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContai
// if inodes are present, only count each inode once
// (hard links do not increase restore size)
if _, ok := uniqueInodes[node.Inode]; !ok || node.Inode == 0 {
uniqueInodes[node.Inode] = struct{}{}
if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 {
hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{})
stats.TotalSize += node.Size
}
return false, nil
}
return true, nil
return nil
}
}
@@ -365,9 +365,9 @@ func statsDebug(ctx context.Context, repo restic.Repository) error {
return nil
}
func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.FileType) (*sizeHistogram, error) {
hist := newSizeHistogram(2 * repository.MaxPackSize)
err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
err := repo.List(ctx, tpe, func(_ restic.ID, size int64) error {
hist.Add(uint64(size))
return nil
})

View File

@@ -5,6 +5,7 @@ import (
"github.com/spf13/cobra"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
@@ -85,7 +86,7 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna
debug.Log("new snapshot saved as %v", id)
// Remove the old snapshot.
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(ctx, h); err != nil {
return false, err
}
@@ -119,7 +120,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
}
changeCnt := 0
for sn := range FindFilteredSnapshots(ctx, repo.Backend(), repo, &opts.SnapshotFilter, args) {
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten())
if err != nil {
Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)

View File

@@ -19,7 +19,7 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
return runUnlock(cmd.Context(), unlockOptions, globalOptions)
},
}

View File

@@ -1,6 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"runtime"
@@ -20,9 +21,32 @@ EXIT STATUS
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("restic %s compiled with %v on %v/%v\n",
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
Run: func(_ *cobra.Command, _ []string) {
if globalOptions.JSON {
type jsonVersion struct {
Version string `json:"version"`
GoVersion string `json:"go_version"`
GoOS string `json:"go_os"`
GoArch string `json:"go_arch"`
}
jsonS := jsonVersion{
Version: version,
GoVersion: runtime.Version(),
GoOS: runtime.GOOS,
GoArch: runtime.GOARCH,
}
err := json.NewEncoder(globalOptions.stdout).Encode(jsonS)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
return
}
} else {
fmt.Printf("restic %s compiled with %v on %v/%v\n",
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
}
},
}

126
cmd/restic/cmd_webdav.go Normal file
View File

@@ -0,0 +1,126 @@
package main
import (
"context"
"log"
"net/http"
"os"
"time"
"github.com/spf13/cobra"
"golang.org/x/net/webdav"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/server/rofs"
)
var cmdWebDAV = &cobra.Command{
Use: "webdav [flags]",
Short: "runs a WebDAV server for the repository",
Long: `
The webdav command runs a WebDAV server for the reposiotry that you can then access via a WebDAV client.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runWebDAV(cmd.Context(), webdavOptions, globalOptions, args)
},
}
// WebDAVOptions collects all options for the webdav command.
type WebDAVOptions struct {
Listen string
restic.SnapshotFilter
TimeTemplate string
PathTemplates []string
}
var webdavOptions WebDAVOptions
func init() {
cmdRoot.AddCommand(cmdWebDAV)
fs := cmdWebDAV.Flags()
fs.StringVarP(&webdavOptions.Listen, "listen", "l", "localhost:3080", "set the listen host name and `address`")
initMultiSnapshotFilter(fs, &webdavOptions.SnapshotFilter, true)
fs.StringArrayVar(&webdavOptions.PathTemplates, "path-template", nil, "set `template` for path names (can be specified multiple times)")
fs.StringVar(&webdavOptions.TimeTemplate, "time-template", time.RFC3339, "set `template` to use for times")
}
func runWebDAV(ctx context.Context, opts WebDAVOptions, gopts GlobalOptions, args []string) error {
if len(args) > 0 {
return errors.Fatal("this command does not accept additional arguments")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
errorLogger := log.New(os.Stderr, "error log: ", log.Flags())
cfg := rofs.Config{
Filter: opts.SnapshotFilter,
TimeTemplate: opts.TimeTemplate,
PathTemplates: opts.PathTemplates,
}
root, err := rofs.New(ctx, repo, cfg)
if err != nil {
return err
}
// root := os.DirFS(".")
// h, err := webdav.NewWebDAV(ctx, root)
// if err != nil {
// return err
// }
// root := fstest.MapFS{
// "foobar": &fstest.MapFile{
// Data: []byte("foobar test content"),
// Mode: 0644,
// ModTime: time.Now(),
// },
// "test.txt": &fstest.MapFile{
// Data: []byte("other file"),
// Mode: 0640,
// ModTime: time.Now(),
// },
// }
logRequest := func(req *http.Request, err error) {
errorLogger.Printf("req %v %v -> %v\n", req.Method, req.URL.Path, err)
}
srv := &http.Server{
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
Addr: opts.Listen,
// Handler: http.FileServer(http.FS(root)),
Handler: &webdav.Handler{
FileSystem: rofs.WebDAVFS(root),
LockSystem: webdav.NewMemLS(),
Logger: logRequest,
},
ErrorLog: errorLogger,
}
return srv.ListenAndServe()
}

View File

@@ -3,8 +3,6 @@ package main
import (
"context"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/restic"
)
@@ -23,46 +21,21 @@ func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Re
// deleteFiles deletes the given fileList of fileType in parallel
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
totalCount := len(fileList)
fileChan := make(chan restic.ID)
wg, ctx := errgroup.WithContext(ctx)
wg.Go(func() error {
defer close(fileChan)
for id := range fileList {
select {
case fileChan <- id:
case <-ctx.Done():
return ctx.Err()
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted")
defer bar.Done()
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
if err != nil {
if !gopts.JSON {
Warnf("unable to remove %v/%v from the repository\n", fileType, id)
}
if !ignoreError {
return err
}
}
if !gopts.JSON && gopts.verbosity > 2 {
Verbosef("removed %v/%v\n", fileType, id)
}
return nil
})
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, uint64(totalCount), "files deleted")
defer bar.Done()
// deleting files is IO-bound
workerCount := repo.Connections()
for i := 0; i < int(workerCount); i++ {
wg.Go(func() error {
for id := range fileChan {
h := restic.Handle{Type: fileType, Name: id.String()}
err := repo.Backend().Remove(ctx, h)
if err != nil {
if !gopts.JSON {
Warnf("unable to remove %v from the repository\n", h)
}
if !ignoreError {
return err
}
}
if !gopts.JSON && gopts.verbosity > 2 {
Verbosef("removed %v\n", h)
}
bar.Add(1)
}
return nil
})
}
err := wg.Wait()
return err
}, bar)
}

View File

@@ -426,7 +426,7 @@ func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) {
return scanner.Err()
}()
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to read excludes from file %q: %w", filename, err)
}
}
return excludes, nil

View File

@@ -3,7 +3,6 @@ package main
import (
"context"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
"github.com/spf13/pflag"
)
@@ -33,7 +32,7 @@ func FindFilteredSnapshots(ctx context.Context, be restic.Lister, loader restic.
out := make(chan *restic.Snapshot)
go func() {
defer close(out)
be, err := backend.MemorizeList(ctx, be, restic.SnapshotFile)
be, err := restic.MemorizeList(ctx, be, restic.SnapshotFile)
if err != nil {
Warnf("could not load snapshots: %v\n", err)
return

View File

@@ -43,12 +43,12 @@ import (
"golang.org/x/term"
)
var version = "0.16.1"
var version = "0.16.4-dev (compiled manually)"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
type backendWrapper func(r restic.Backend) (restic.Backend, error)
type backendWrapper func(r backend.Backend) (backend.Backend, error)
// GlobalOptions hold all global options for restic.
type GlobalOptions struct {
@@ -67,6 +67,7 @@ type GlobalOptions struct {
CleanupCache bool
Compression repository.CompressionMode
PackSize uint
NoExtraVerify bool
backend.TransportOptions
limiter.Limits
@@ -127,7 +128,7 @@ func init() {
f.StringVarP(&globalOptions.KeyHint, "key-hint", "", "", "`key` ID of key to try decrypting first (default: $RESTIC_KEY_HINT)")
f.StringVarP(&globalOptions.PasswordCommand, "password-command", "", "", "shell `command` to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND)")
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
// use empty paremeter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
// use empty parameter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repository, this allows some operations on read-only repositories")
f.DurationVar(&globalOptions.RetryLock, "retry-lock", 0, "retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)")
@@ -139,6 +140,7 @@ func init() {
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)")
f.BoolVar(&globalOptions.NoExtraVerify, "no-extra-verify", false, "skip additional verification of data before upload (see documentation)")
f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)")
f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)")
f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)")
@@ -453,8 +455,9 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
}
s, err := repository.New(be, repository.Options{
Compression: opts.Compression,
PackSize: opts.PackSize * 1024 * 1024,
Compression: opts.Compression,
PackSize: opts.PackSize * 1024 * 1024,
NoExtraVerify: opts.NoExtraVerify,
})
if err != nil {
return nil, errors.Fatal(err.Error())
@@ -553,7 +556,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
cfg := loc.Config
if cfg, ok := cfg.(restic.ApplyEnvironmenter); ok {
if cfg, ok := cfg.(backend.ApplyEnvironmenter); ok {
cfg.ApplyEnvironment("")
}
@@ -568,14 +571,14 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
}
// Open the backend specified by a location config.
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, errors.Fatalf("parsing repository location failed: %v", err)
}
var be restic.Backend
var be backend.Backend
cfg, err := parseConfig(loc, opts)
if err != nil {
@@ -613,7 +616,7 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
// check if config is there
fi, err := be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s))
}
@@ -626,7 +629,7 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
// Create the backend specified by URI.
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
loc, err := location.Parse(gopts.backends, s)
if err != nil {

View File

@@ -12,6 +12,7 @@ import (
"sync"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/retry"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
@@ -123,9 +124,8 @@ func directoriesContentsDiff(dir1, dir2 string) string {
fmt.Fprintf(&out, "+%v\n", b.path)
b = nil
continue
} else {
fmt.Fprintf(&out, "%%%v\n", a.path)
}
fmt.Fprintf(&out, "%%%v\n", a.path)
}
a, b = nil, nil
@@ -205,7 +205,7 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
extended: make(options.Options),
// replace this hook with "nil" if listing a filetype more than once is necessary
backendTestHook: func(r restic.Backend) (restic.Backend, error) { return newOrderedListOnceBackend(r), nil },
backendTestHook: func(r backend.Backend) (backend.Backend, error) { return newOrderedListOnceBackend(r), nil },
// start with default set of backends
backends: globalOptions.backends,
}
@@ -249,7 +249,7 @@ func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
rtest.OK(t, err)
for id := range remove {
rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
}
}
@@ -272,7 +272,7 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem
if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
return nil
}
return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})
}))
}

View File

@@ -8,6 +8,7 @@ import (
"path/filepath"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
@@ -41,12 +42,12 @@ func TestCheckRestoreNoLock(t *testing.T) {
// backends (like e.g. Amazon S3) as the second listing may be inconsistent to what
// is expected by the first listing + some operations.
type listOnceBackend struct {
restic.Backend
backend.Backend
listedFileType map[restic.FileType]bool
strictOrder bool
}
func newListOnceBackend(be restic.Backend) *listOnceBackend {
func newListOnceBackend(be backend.Backend) *listOnceBackend {
return &listOnceBackend{
Backend: be,
listedFileType: make(map[restic.FileType]bool),
@@ -54,7 +55,7 @@ func newListOnceBackend(be restic.Backend) *listOnceBackend {
}
}
func newOrderedListOnceBackend(be restic.Backend) *listOnceBackend {
func newOrderedListOnceBackend(be backend.Backend) *listOnceBackend {
return &listOnceBackend{
Backend: be,
listedFileType: make(map[restic.FileType]bool),
@@ -62,7 +63,7 @@ func newOrderedListOnceBackend(be restic.Backend) *listOnceBackend {
}
}
func (be *listOnceBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
func (be *listOnceBackend) List(ctx context.Context, t restic.FileType, fn func(backend.FileInfo) error) error {
if t != restic.LockFile && be.listedFileType[t] {
return errors.Errorf("tried listing type %v the second time", t)
}
@@ -77,7 +78,7 @@ func TestListOnce(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
return newListOnceBackend(r), nil
}
pruneOpts := PruneOptions{MaxUnused: "0"}
@@ -104,10 +105,10 @@ func (r *writeToOnly) WriteTo(w io.Writer) (int64, error) {
}
type onlyLoadWithWriteToBackend struct {
restic.Backend
backend.Backend
}
func (be *onlyLoadWithWriteToBackend) Load(ctx context.Context, h restic.Handle,
func (be *onlyLoadWithWriteToBackend) Load(ctx context.Context, h backend.Handle,
length int, offset int64, fn func(rd io.Reader) error) error {
return be.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
@@ -120,7 +121,7 @@ func TestBackendLoadWriteTo(t *testing.T) {
defer cleanup()
// setup backend which only works if it's WriteTo method is correctly propagated upwards
env.gopts.backendInnerTestHook = func(r restic.Backend) (restic.Backend, error) {
env.gopts.backendInnerTestHook = func(r backend.Backend) (backend.Backend, error) {
return &onlyLoadWithWriteToBackend{Backend: r}, nil
}
@@ -140,7 +141,7 @@ func TestFindListOnce(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
return newListOnceBackend(r), nil
}
@@ -158,7 +159,7 @@ func TestFindListOnce(t *testing.T) {
snapshotIDs := restic.NewIDSet()
// specify the two oldest snapshots explicitly and use "latest" to reference the newest one
for sn := range FindFilteredSnapshots(context.TODO(), repo.Backend(), repo, &restic.SnapshotFilter{}, []string{
for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{
secondSnapshot[0].String(),
secondSnapshot[1].String()[:8],
"latest",

View File

@@ -6,6 +6,7 @@ import (
"sync"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -131,7 +132,7 @@ type refreshLockRequest struct {
result chan bool
}
func refreshLocks(ctx context.Context, backend restic.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
debug.Log("start")
lock := lockInfo.lock
ticker := time.NewTicker(refreshInterval)
@@ -257,8 +258,8 @@ func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-
}
}
func tryRefreshStaleLock(ctx context.Context, backend restic.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
freeze := restic.AsBackend[restic.FreezeBackend](backend)
func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
freeze := backend.AsBackend[backend.FreezeBackend](be)
if freeze != nil {
debug.Log("freezing backend")
freeze.Freeze()

View File

@@ -9,6 +9,7 @@ import (
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/debug"
@@ -104,11 +105,11 @@ func TestLockConflict(t *testing.T) {
}
type writeOnceBackend struct {
restic.Backend
backend.Backend
written bool
}
func (b *writeOnceBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
if b.written {
return fmt.Errorf("fail after first write")
}
@@ -117,7 +118,7 @@ func (b *writeOnceBackend) Save(ctx context.Context, h restic.Handle, rd restic.
}
func TestLockFailedRefresh(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &writeOnceBackend{Backend: r}, nil
})
defer cleanup()
@@ -143,11 +144,11 @@ func TestLockFailedRefresh(t *testing.T) {
}
type loggingBackend struct {
restic.Backend
backend.Backend
t *testing.T
}
func (b *loggingBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
b.t.Logf("save %v @ %v", h, time.Now())
err := b.Backend.Save(ctx, h, rd)
b.t.Logf("save finished %v @ %v", h, time.Now())
@@ -155,7 +156,7 @@ func (b *loggingBackend) Save(ctx context.Context, h restic.Handle, rd restic.Re
}
func TestLockSuccessfulRefresh(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &loggingBackend{
Backend: r,
t: t,
@@ -193,12 +194,12 @@ func TestLockSuccessfulRefresh(t *testing.T) {
}
type slowBackend struct {
restic.Backend
backend.Backend
m sync.Mutex
sleep time.Duration
}
func (b *slowBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
b.m.Lock()
sleep := b.sleep
b.m.Unlock()
@@ -208,7 +209,7 @@ func (b *slowBackend) Save(ctx context.Context, h restic.Handle, rd restic.Rewin
func TestLockSuccessfulStaleRefresh(t *testing.T) {
var sb *slowBackend
repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
sb = &slowBackend{Backend: r}
return sb, nil
})

View File

@@ -37,7 +37,7 @@ The full documentation can be found at https://restic.readthedocs.io/ .
SilenceUsage: true,
DisableAutoGenTag: true,
PersistentPreRunE: func(c *cobra.Command, args []string) error {
PersistentPreRunE: func(c *cobra.Command, _ []string) error {
// set verbosity, default is one
globalOptions.verbosity = 1
if globalOptions.Quiet && globalOptions.Verbose > 0 {

Some files were not shown because too many files have changed in this diff Show More