2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
util/testingutil: new package with MinAllocsPerRun
testing.AllocsPerRun measures the total allocations performed
by the entire program while repeatedly executing a function f.
If some unrelated part of the rest of the program happens to
allocate a lot during that period, you end up with a test failure.
Ideally, the rest of the program would be silent while
testing.AllocsPerRun executes.
Realistically, that is often unachievable.
AllocsPerRun attempts to mitigate this by setting GOMAXPROCS to 1,
but that doesn't prevent other code from running;
it only makes it less likely.
You can also mitigate this by passing a large iteration count to
AllocsPerRun, but that is unreliable and needlessly expensive.
Unlike most of package testing, AllocsPerRun doesn't use any
toolchain magic, so we can just write a replacement.
One wild idea is to change how we count mallocs.
Instead of using runtime.MemStats, turn on memory profiling with a
memprofilerate of 1. Discard all samples from the profile whose stack
does not contain testing.AllocsPerRun. Count the remaining samples to
determine the number of mallocs.
That's fun, but overkill.
Instead, this change adds a simple API that attempts to get f to
run at least once with a target number of allocations.
This is useful when you know that f should allocate consistently.
We can then assume that any iterations with too many allocations
are probably due to one-time costs or background noise.
This suits most uses of AllocsPerRun.
Ratcheting tests tend to be significantly less flaky,
because they are biased towards success.
They can also be faster, because they can exit early,
once success has been reached.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-10-27 23:00:32 +00:00
|
|
|
|
|
|
|
package tstest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"runtime"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
// MinAllocsPerRun asserts that f can run with no more than target allocations.
|
|
|
|
// It runs f up to 1000 times or 5s, whichever happens first.
|
|
|
|
// If f has executed more than target allocations on every run, it returns a non-nil error.
|
|
|
|
//
|
|
|
|
// MinAllocsPerRun sets GOMAXPROCS to 1 during its measurement and restores
|
|
|
|
// it before returning.
|
|
|
|
func MinAllocsPerRun(t *testing.T, target uint64, f func()) error {
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
|
|
|
|
|
|
|
|
var memstats runtime.MemStats
|
|
|
|
var min, max, sum uint64
|
|
|
|
start := time.Now()
|
|
|
|
var iters int
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(&memstats)
|
|
|
|
startMallocs := memstats.Mallocs
|
|
|
|
f()
|
|
|
|
runtime.ReadMemStats(&memstats)
|
|
|
|
mallocs := memstats.Mallocs - startMallocs
|
|
|
|
// TODO: if mallocs < target, return an error? See discussion in #3204.
|
|
|
|
if mallocs <= target {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if min == 0 || mallocs < min {
|
|
|
|
min = mallocs
|
|
|
|
}
|
|
|
|
if mallocs > max {
|
|
|
|
max = mallocs
|
|
|
|
}
|
|
|
|
sum += mallocs
|
|
|
|
iters++
|
|
|
|
if iters == 1000 || time.Since(start) > 5*time.Second {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Errorf("min allocs = %d, max allocs = %d, avg allocs/run = %f, want run with <= %d allocs", min, max, float64(sum)/float64(iters), target)
|
|
|
|
}
|