adapt workers based on whether an operation is CPU or IO-bound

Use runtime.GOMAXPROCS(0) as worker count for CPU-bound tasks,
repo.Connections() for IO-bound task and a combination if a task can be
both. Streaming packs is treated as IO-bound as adding more worker
cannot provide a speedup.

Typical IO-bound tasks are download / uploading / deleting files.
Decoding / Encoding / Verifying are usually CPU-bound. Several tasks are
a combination of both, e.g. for combined download and decode functions.
In the latter case add both limits together. As the backends have their
own concurrency limits restic still won't download more than
repo.Connections() files in parallel, but the additional workers can
decode already downloaded data in parallel.
This commit is contained in:
Michael Eischer
2021-08-08 00:38:17 +02:00
parent cd50feb66f
commit 6f53ecc1ae
16 changed files with 66 additions and 40 deletions

View File

@@ -20,8 +20,6 @@ import (
// con: each worker needs to keep one pack in memory
const (
workerCount = 8
largeFileBlobCount = 25
)
@@ -51,6 +49,7 @@ type fileRestorer struct {
idx func(restic.BlobHandle) []restic.PackedBlob
packLoader repository.BackendLoadFn
workerCount int
filesWriter *filesWriter
dst string
@@ -61,13 +60,18 @@ type fileRestorer struct {
func newFileRestorer(dst string,
packLoader repository.BackendLoadFn,
key *crypto.Key,
idx func(restic.BlobHandle) []restic.PackedBlob) *fileRestorer {
idx func(restic.BlobHandle) []restic.PackedBlob,
connections uint) *fileRestorer {
// as packs are streamed the concurrency is limited by IO
workerCount := int(connections)
return &fileRestorer{
key: key,
idx: idx,
packLoader: packLoader,
filesWriter: newFilesWriter(workerCount),
workerCount: workerCount,
dst: dst,
Error: restorerAbortOnAllErrors,
}
@@ -150,7 +154,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
}
return nil
}
for i := 0; i < workerCount; i++ {
for i := 0; i < r.workerCount; i++ {
wg.Go(worker)
}