mirror of
https://github.com/restic/restic.git
synced 2025-12-13 12:02:59 +00:00
Merge pull request #2630 from MichaelEischer/fix-staticcheck
Fix lots of small issues reported by staticcheck
This commit is contained in:
@@ -196,7 +196,7 @@ func uniqueNodeNames(tree1, tree2 *restic.Tree) (tree1Nodes, tree2Nodes map[stri
|
||||
uniqueNames = append(uniqueNames, name)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(uniqueNames))
|
||||
sort.Strings(uniqueNames)
|
||||
return tree1Nodes, tree2Nodes, uniqueNames
|
||||
}
|
||||
|
||||
|
||||
@@ -270,7 +270,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
@@ -314,7 +314,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
||||
|
||||
if !childMayMatch {
|
||||
ignoreIfNoMatch = true
|
||||
errIfNoMatch = walker.SkipNode
|
||||
errIfNoMatch = walker.ErrSkipNode
|
||||
} else {
|
||||
ignoreIfNoMatch = false
|
||||
}
|
||||
@@ -354,7 +354,7 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
|
||||
@@ -222,7 +222,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
|
||||
// otherwise, signal the walker to not walk recursively into any
|
||||
// subdirs
|
||||
if node.Type == "dir" {
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
@@ -251,9 +251,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
||||
// Prints nothing, if we did not group at all.
|
||||
func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
|
||||
err = json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
err := json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ func DeleteFiles(gopts GlobalOptions, repo restic.Repository, fileList restic.ID
|
||||
deleteFiles(gopts, true, repo, fileList, fileType)
|
||||
}
|
||||
|
||||
// DeleteFiles deletes the given fileList of fileType in parallel
|
||||
// DeleteFilesChecked deletes the given fileList of fileType in parallel
|
||||
// if an error occurs, it will cancel and return this error
|
||||
func DeleteFilesChecked(gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
||||
return deleteFiles(gopts, false, repo, fileList, fileType)
|
||||
|
||||
@@ -190,7 +190,7 @@ func isDirExcludedByFile(dir, tagFilename, header string) bool {
|
||||
Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
|
||||
return false
|
||||
}
|
||||
if bytes.Compare(buf, []byte(header)) != 0 {
|
||||
if !bytes.Equal(buf, []byte(header)) {
|
||||
Warnf("invalid signature in exclusion tagfile %q\n", tf)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func walkDir(dir string) <-chan *dirEntry {
|
||||
}()
|
||||
|
||||
// first element is root
|
||||
_ = <-ch
|
||||
<-ch
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ func testRunCheckOutput(gopts GlobalOptions) (string, error) {
|
||||
}
|
||||
|
||||
err := runCheck(opts, gopts, nil)
|
||||
return string(buf.Bytes()), err
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||
@@ -177,7 +177,7 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||
|
||||
rtest.OK(t, runLs(opts, gopts, []string{snapshotID}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
return strings.Split(buf.String(), "\n")
|
||||
}
|
||||
|
||||
func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
|
||||
@@ -253,7 +253,6 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||
"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
|
||||
rtest.Assert(t, len(forgets[0].Remove) == 2,
|
||||
"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
|
||||
return
|
||||
}
|
||||
|
||||
func testRunPrune(t testing.TB, gopts GlobalOptions) {
|
||||
@@ -450,7 +449,7 @@ func TestBackupExclude(t *testing.T) {
|
||||
f, err := os.Create(fp)
|
||||
rtest.OK(t, err)
|
||||
|
||||
fmt.Fprintf(f, filename)
|
||||
fmt.Fprint(f, filename)
|
||||
rtest.OK(t, f.Close())
|
||||
}
|
||||
|
||||
@@ -1105,14 +1104,14 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
|
||||
|
||||
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
|
||||
fi, err := os.Stat(f1)
|
||||
_, err := os.Stat(f1)
|
||||
rtest.OK(t, err)
|
||||
|
||||
// restore with filter "*", this should restore meta data on everything.
|
||||
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
|
||||
|
||||
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
|
||||
fi, err = os.Stat(f2)
|
||||
fi, err := os.Stat(f2)
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
|
||||
@@ -1417,11 +1416,7 @@ func linksEqual(source, dest map[uint64][]string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
if len(dest) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return len(dest) == 0
|
||||
}
|
||||
|
||||
func linkEqual(source, dest []string) bool {
|
||||
|
||||
Reference in New Issue
Block a user