Fix typos

This commit is contained in:
Andrea Gelmini
2023-12-06 13:11:55 +01:00
parent b72de5a883
commit 241916d55b
45 changed files with 67 additions and 67 deletions

View File

@@ -267,7 +267,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi
// FutureNode holds a reference to a channel that returns a FutureNodeResult
// or a reference to an already existing result. If the result is available
// immediatelly, then storing a reference directly requires less memory than
// immediately, then storing a reference directly requires less memory than
// using the indirection via a channel.
type FutureNode struct {
ch <-chan futureNodeResult

View File

@@ -31,7 +31,7 @@ type b2Backend struct {
canDelete bool
}
// Billing happens in 1000 item granlarity, but we are more interested in reducing the number of network round trips
// Billing happens in 1000 item granularity, but we are more interested in reducing the number of network round trips
const defaultListMaxItems = 10 * 1000
// ensure statically that *b2Backend implements backend.Backend.

View File

@@ -18,7 +18,7 @@ type Backend interface {
// repository.
Location() string
// Connections returns the maxmimum number of concurrent backend operations.
// Connections returns the maximum number of concurrent backend operations.
Connections() uint
// Hasher may return a hash function for calculating a content hash for the backend

View File

@@ -5,8 +5,8 @@ import (
"net/http"
)
// Limiter defines an interface that implementors can use to rate limit I/O
// according to some policy defined and configured by the implementor.
// Limiter defines an interface that implementers can use to rate limit I/O
// according to some policy defined and configured by the implementer.
type Limiter interface {
// Upstream returns a rate limited reader that is intended to be used in
// uploads.

View File

@@ -194,7 +194,7 @@ func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReade
}
}
// try to mark file as read-only to avoid accidential modifications
// try to mark file as read-only to avoid accidental modifications
// ignore if the operation fails as some filesystems don't allow the chmod call
// e.g. exfat and network file systems with certain mount options
err = setFileReadonly(finalname, b.Modes.File)

View File

@@ -302,7 +302,7 @@ func Join(parts ...string) string {
}
// tempSuffix generates a random string suffix that should be sufficiently long
// to avoid accidential conflicts
// to avoid accidental conflicts
func tempSuffix() string {
var nonce [16]byte
_, err := rand.Read(nonce[:])

View File

@@ -6,7 +6,7 @@ import (
"github.com/restic/restic/internal/errors"
)
// shellSplitter splits a command string into separater arguments. It supports
// shellSplitter splits a command string into separated arguments. It supports
// single and double quoted strings.
type shellSplitter struct {
quote rune

View File

@@ -11,7 +11,7 @@ import (
)
func startForeground(cmd *exec.Cmd) (bg func() error, err error) {
// run the command in it's own process group so that SIGINT
// run the command in its own process group so that SIGINT
// is not sent to it.
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,

View File

@@ -442,7 +442,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
}
// Note that we do not use the blob size. The "obvious" check
// whether the sum of the blob sizes matches the file size
// unfortunately fails in some cases that are not resolveable
// unfortunately fails in some cases that are not resolvable
// by users, so we omit this check, see #1887
_, found := c.repo.LookupBlobSize(blobID, restic.DataBlob)

View File

@@ -166,7 +166,7 @@ func (h HRESULT) Str() string {
return "UNKNOWN"
}
// VssError encapsulates errors retruned from calling VSS api.
// VssError encapsulates errors returned from calling VSS api.
type vssError struct {
text string
hresult HRESULT
@@ -190,7 +190,7 @@ func (e *vssError) Error() string {
return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult)
}
// VssError encapsulates errors retruned from calling VSS api.
// VssError encapsulates errors returned from calling VSS api.
type vssTextError struct {
text string
}
@@ -615,7 +615,7 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) {
return HRESULT(result), state
}
// WaitUntilAsyncFinished waits until either the async call is finshed or
// WaitUntilAsyncFinished waits until either the async call is finished or
// the given timeout is reached.
func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error {
hresult := vssAsync.Wait(millis)
@@ -858,7 +858,7 @@ func NewVssSnapshot(
if err != nil {
// After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS
// instance for proper cleanup.
// It is not neccessary to call BackupComplete before releasing the VSS instance afterwards.
// It is not necessary to call BackupComplete before releasing the VSS instance afterwards.
iVssBackupComponents.AbortBackup()
iVssBackupComponents.Release()
return VssSnapshot{}, err

View File

@@ -46,7 +46,7 @@ func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, err
}, nil
}
// returing a wrapped context.Canceled error will instead result in returing
// returning a wrapped context.Canceled error will instead result in returning
// an input / output error to the user. Thus unwrap the error to match the
// expectations of bazil/fuse
func unwrapCtxCanceled(err error) error {

View File

@@ -142,7 +142,7 @@ func (f *openFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.R
// Multiple goroutines may call service methods simultaneously;
// the methods being called are responsible for appropriate synchronization.
//
// However, no lock needed here as getBlobAt can be called conurrently
// However, no lock needed here as getBlobAt can be called concurrently
// (blobCache has its own locking)
for i := startContent; remainingBytes > 0 && i < len(f.cumsize)-1; i++ {
blob, err := f.getBlobAt(ctx, i)

View File

@@ -25,7 +25,7 @@ type MasterIndex struct {
func NewMasterIndex() *MasterIndex {
// Always add an empty final index, such that MergeFinalIndexes can merge into this.
// Note that removing this index could lead to a race condition in the rare
// sitation that only two indexes exist which are saved and merged concurrently.
// situation that only two indexes exist which are saved and merged concurrently.
idx := []*Index{NewIndex()}
idx[0].Finalize()
return &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()}

View File

@@ -189,7 +189,7 @@ const (
// MaxHeaderSize is the max size of header including header-length field
MaxHeaderSize = 16*1024*1024 + headerLengthSize
// number of header enries to download as part of header-length request
// number of header entries to download as part of header-length request
eagerEntries = 15
)

View File

@@ -39,7 +39,7 @@ type packerManager struct {
packSize uint
}
// newPackerManager returns an new packer manager which writes temporary files
// newPackerManager returns a new packer manager which writes temporary files
// to a temporary directory
func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, queueFn func(ctx context.Context, t restic.BlobType, p *Packer) error) *packerManager {
return &packerManager{

View File

@@ -83,7 +83,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
}
// selectBlobs splits the list of all blobs randomly into two lists. A blob
// will be contained in the firstone ith probability p.
// will be contained in the firstone with probability p.
func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) {
list1 = restic.NewBlobSet()
list2 = restic.NewBlobSet()

View File

@@ -932,7 +932,7 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
ctx, cancel := context.WithCancel(ctx)
// stream blobs in pack
err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error {
// prevent callbacks after cancelation
// prevent callbacks after cancellation
if ctx.Err() != nil {
return ctx.Err()
}

View File

@@ -523,7 +523,7 @@ func testStreamPack(t *testing.T, version uint) {
case 2:
compress = true
default:
t.Fatal("test does not suport repository version", version)
t.Fatal("test does not support repository version", version)
}
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)

View File

@@ -13,7 +13,7 @@ func TestCountedBlobSet(t *testing.T) {
test.Equals(t, bs.List(), restic.BlobHandles{})
bh := restic.NewRandomBlobHandle()
// check non existant
// check non existent
test.Equals(t, bs.Has(bh), false)
// test insert

View File

@@ -38,7 +38,7 @@ func TestGroupByOptions(t *testing.T) {
var opts restic.SnapshotGroupByOptions
test.OK(t, opts.Set(exp.from))
if !cmp.Equal(opts, exp.opts) {
t.Errorf("unexpeted opts %s", cmp.Diff(opts, exp.opts))
t.Errorf("unexpected opts %s", cmp.Diff(opts, exp.opts))
}
test.Equals(t, opts.String(), exp.normalized)
}

View File

@@ -296,7 +296,7 @@ func testPartialDownloadError(t *testing.T, part int) {
// loader always returns an error
loader := repo.loader
repo.loader = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// only load partial data to execise fault handling in different places
// only load partial data to exercise fault handling in different places
err := loader(ctx, h, length*part/100, offset, fn)
if err == nil {
return nil

View File

@@ -22,7 +22,7 @@ func NewHardlinkIndex[T any]() *HardlinkIndex[T] {
}
}
// Has checks wether the link already exist in the index.
// Has checks whether the link already exist in the index.
func (idx *HardlinkIndex[T]) Has(inode uint64, device uint64) bool {
idx.m.Lock()
defer idx.m.Unlock()

View File

@@ -791,7 +791,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) {
}
}
// VerifyFiles must not report cancelation of its context through res.Error.
// VerifyFiles must not report cancellation of its context through res.Error.
func TestVerifyCancel(t *testing.T) {
snapshot := Snapshot{
Nodes: map[string]Node{

View File

@@ -325,7 +325,7 @@ func Truncate(s string, w int) string {
// Guess whether the first rune in s would occupy two terminal cells
// instead of one. This cannot be determined exactly without knowing
// the terminal font, so we treat all ambigous runes as full-width,
// the terminal font, so we treat all ambiguous runes as full-width,
// i.e., two cells.
func wideRune(s string) (wide bool, utfsize uint) {
prop, size := width.LookupString(s)

View File

@@ -69,7 +69,7 @@ func checkRewriteItemOrder(want []string) checkRewriteFunc {
}
}
// checkRewriteSkips excludes nodes if path is in skipFor, it checks that rewriting proceedes in the correct order.
// checkRewriteSkips excludes nodes if path is in skipFor, it checks that rewriting proceeds in the correct order.
func checkRewriteSkips(skipFor map[string]struct{}, want []string, disableCache bool) checkRewriteFunc {
var pos int