mirror of
https://github.com/tailscale/tailscale.git
synced 2025-01-05 14:57:49 +00:00
tstest/integration: build binaries only once
The existing code relied on the Go build cache to avoid needless work when obtaining the tailscale binaries. For non-obvious reasons, the binaries were getting re-linked every time, which added 600ms or so on my machine to every test. Instead, build the binaries exactly once, on demand. This reduces the time to run 'go test -count=5' from 34s to 10s on my machine. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
This commit is contained in:
parent
e133bb570b
commit
4691e012a9
@ -43,38 +43,68 @@
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
// Binaries are the paths to a tailscaled and tailscale binary.
|
||||
// These can be shared by multiple nodes.
|
||||
type Binaries struct {
|
||||
Dir string // temp dir for tailscale & tailscaled
|
||||
Daemon string // tailscaled
|
||||
CLI string // tailscale
|
||||
}
|
||||
|
||||
// BuildTestBinaries builds tailscale and tailscaled, failing the test
|
||||
// if they fail to compile.
|
||||
func BuildTestBinaries(t testing.TB) *Binaries {
|
||||
td := t.TempDir()
|
||||
build(t, td, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale")
|
||||
return &Binaries{
|
||||
Dir: td,
|
||||
Daemon: filepath.Join(td, "tailscaled"+exe()),
|
||||
CLI: filepath.Join(td, "tailscale"+exe()),
|
||||
// CleanupBinaries cleans up any resources created by calls to BinaryDir, TailscaleBinary, or TailscaledBinary.
|
||||
// It should be called from TestMain after all tests have completed.
|
||||
func CleanupBinaries() {
|
||||
buildOnce.Do(func() {})
|
||||
if binDir != "" {
|
||||
os.RemoveAll(binDir)
|
||||
}
|
||||
}
|
||||
|
||||
// buildMu limits our use of "go build" to one at a time, so we don't
|
||||
// fight Go's built-in caching trying to do the same build concurrently.
|
||||
var buildMu sync.Mutex
|
||||
// BinaryDir returns a directory containing test tailscale and tailscaled binaries.
|
||||
// If any test calls BinaryDir, there must be a TestMain function that calls
|
||||
// CleanupBinaries after all tests are complete.
|
||||
func BinaryDir(tb testing.TB) string {
|
||||
buildOnce.Do(func() {
|
||||
binDir, buildErr = buildTestBinaries()
|
||||
})
|
||||
if buildErr != nil {
|
||||
tb.Fatal(buildErr)
|
||||
}
|
||||
return binDir
|
||||
}
|
||||
|
||||
func build(t testing.TB, outDir string, targets ...string) {
|
||||
buildMu.Lock()
|
||||
defer buildMu.Unlock()
|
||||
// TailscaleBinary returns the path to the test tailscale binary.
|
||||
// If any test calls TailscaleBinary, there must be a TestMain function that calls
|
||||
// CleanupBinaries after all tests are complete.
|
||||
func TailscaleBinary(tb testing.TB) string {
|
||||
return filepath.Join(BinaryDir(tb), "tailscale"+exe())
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() { t.Logf("built %s in %v", targets, time.Since(t0).Round(time.Millisecond)) }()
|
||||
// TailscaledBinary returns the path to the test tailscaled binary.
|
||||
// If any test calls TailscaleBinary, there must be a TestMain function that calls
|
||||
// CleanupBinaries after all tests are complete.
|
||||
func TailscaledBinary(tb testing.TB) string {
|
||||
return filepath.Join(BinaryDir(tb), "tailscaled"+exe())
|
||||
}
|
||||
|
||||
goBin := findGo(t)
|
||||
var (
|
||||
buildOnce sync.Once
|
||||
buildErr error
|
||||
binDir string
|
||||
)
|
||||
|
||||
// buildTestBinaries builds tailscale and tailscaled.
|
||||
// It returns the dir containing the binaries.
|
||||
func buildTestBinaries() (string, error) {
|
||||
bindir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = build(bindir, "tailscale.com/cmd/tailscaled", "tailscale.com/cmd/tailscale")
|
||||
if err != nil {
|
||||
os.RemoveAll(bindir)
|
||||
return "", err
|
||||
}
|
||||
return bindir, nil
|
||||
}
|
||||
|
||||
func build(outDir string, targets ...string) error {
|
||||
goBin, err := findGo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd := exec.Command(goBin, "install")
|
||||
if version.IsRace() {
|
||||
cmd.Args = append(cmd.Args, "-race")
|
||||
@ -83,7 +113,7 @@ func build(t testing.TB, outDir string, targets ...string) {
|
||||
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH, "GOBIN="+outDir)
|
||||
errOut, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if strings.Contains(string(errOut), "when GOBIN is set") {
|
||||
// Fallback slow path for cross-compiled binaries.
|
||||
@ -92,25 +122,25 @@ func build(t testing.TB, outDir string, targets ...string) {
|
||||
cmd := exec.Command(goBin, "build", "-o", outFile, target)
|
||||
cmd.Env = append(os.Environ(), "GOARCH="+runtime.GOARCH)
|
||||
if errOut, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("failed to build %v with %v: %v, %s", target, goBin, err, errOut)
|
||||
return fmt.Errorf("failed to build %v with %v: %v, %s", target, goBin, err, errOut)
|
||||
}
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
t.Fatalf("failed to build %v with %v: %v, %s", targets, goBin, err, errOut)
|
||||
return fmt.Errorf("failed to build %v with %v: %v, %s", targets, goBin, err, errOut)
|
||||
}
|
||||
|
||||
func findGo(t testing.TB) string {
|
||||
func findGo() (string, error) {
|
||||
goBin := filepath.Join(runtime.GOROOT(), "bin", "go"+exe())
|
||||
if fi, err := os.Stat(goBin); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Fatalf("failed to find go at %v", goBin)
|
||||
return "", fmt.Errorf("failed to find go at %v", goBin)
|
||||
}
|
||||
t.Fatalf("looking for go binary: %v", err)
|
||||
return "", fmt.Errorf("looking for go binary: %v", err)
|
||||
} else if !fi.Mode().IsRegular() {
|
||||
t.Fatalf("%v is unexpected %v", goBin, fi.Mode())
|
||||
return "", fmt.Errorf("%v is unexpected %v", goBin, fi.Mode())
|
||||
}
|
||||
return goBin
|
||||
return goBin, nil
|
||||
}
|
||||
|
||||
func exe() string {
|
||||
|
@ -52,6 +52,7 @@ func TestMain(m *testing.M) {
|
||||
os.Setenv("TS_DISABLE_UPNP", "true")
|
||||
flag.Parse()
|
||||
v := m.Run()
|
||||
CleanupBinaries()
|
||||
if v != 0 {
|
||||
os.Exit(v)
|
||||
}
|
||||
@ -64,9 +65,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestOneNodeUpNoAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
|
||||
d1 := n1.StartDaemon(t)
|
||||
@ -83,9 +82,7 @@ func TestOneNodeUpNoAuth(t *testing.T) {
|
||||
|
||||
func TestOneNodeExpiredKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
|
||||
d1 := n1.StartDaemon(t)
|
||||
@ -121,12 +118,10 @@ func TestOneNodeExpiredKey(t *testing.T) {
|
||||
|
||||
func TestCollectPanic(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n := newTestNode(t, env)
|
||||
|
||||
cmd := exec.Command(n.env.Binaries.Daemon, "--cleanup")
|
||||
cmd := exec.Command(env.daemon, "--cleanup")
|
||||
cmd.Env = append(os.Environ(),
|
||||
"TS_PLEASE_PANIC=1",
|
||||
"TS_LOG_TARGET="+n.env.LogCatcherServer.URL,
|
||||
@ -135,7 +130,7 @@ func TestCollectPanic(t *testing.T) {
|
||||
t.Logf("initial run: %s", got)
|
||||
|
||||
// Now we run it again, and on start, it will upload the logs to logcatcher.
|
||||
cmd = exec.Command(n.env.Binaries.Daemon, "--cleanup")
|
||||
cmd = exec.Command(env.daemon, "--cleanup")
|
||||
cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("cleanup failed: %v: %q", err, out)
|
||||
@ -154,9 +149,7 @@ func TestCollectPanic(t *testing.T) {
|
||||
// test Issue 2321: Start with UpdatePrefs should save prefs to disk
|
||||
func TestStateSavedOnStart(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
|
||||
d1 := n1.StartDaemon(t)
|
||||
@ -192,9 +185,7 @@ func TestStateSavedOnStart(t *testing.T) {
|
||||
|
||||
func TestOneNodeUpAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins, configureControl(func(control *testcontrol.Server) {
|
||||
env := newTestEnv(t, configureControl(func(control *testcontrol.Server) {
|
||||
control.RequireAuth = true
|
||||
}))
|
||||
|
||||
@ -237,9 +228,7 @@ func TestOneNodeUpAuth(t *testing.T) {
|
||||
|
||||
func TestTwoNodes(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create two nodes:
|
||||
n1 := newTestNode(t, env)
|
||||
@ -285,9 +274,7 @@ func TestTwoNodes(t *testing.T) {
|
||||
|
||||
func TestNodeAddressIPFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
d1 := n1.StartDaemon(t)
|
||||
|
||||
@ -313,9 +300,7 @@ func TestNodeAddressIPFields(t *testing.T) {
|
||||
|
||||
func TestAddPingRequest(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
n1.StartDaemon(t)
|
||||
|
||||
@ -369,9 +354,7 @@ func TestAddPingRequest(t *testing.T) {
|
||||
// be connected to control.
|
||||
func TestNoControlConnWhenDown(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
|
||||
d1 := n1.StartDaemon(t)
|
||||
@ -412,9 +395,7 @@ func TestNoControlConnWhenDown(t *testing.T) {
|
||||
// without the GUI to kick off a Start.
|
||||
func TestOneNodeUpWindowsStyle(t *testing.T) {
|
||||
t.Parallel()
|
||||
bins := BuildTestBinaries(t)
|
||||
|
||||
env := newTestEnv(t, bins)
|
||||
env := newTestEnv(t)
|
||||
n1 := newTestNode(t, env)
|
||||
n1.upFlagGOOS = "windows"
|
||||
|
||||
@ -431,8 +412,9 @@ func TestOneNodeUpWindowsStyle(t *testing.T) {
|
||||
// testEnv contains the test environment (set of servers) used by one
|
||||
// or more nodes.
|
||||
type testEnv struct {
|
||||
t testing.TB
|
||||
Binaries *Binaries
|
||||
t testing.TB
|
||||
cli string
|
||||
daemon string
|
||||
|
||||
LogCatcher *LogCatcher
|
||||
LogCatcherServer *httptest.Server
|
||||
@ -456,7 +438,7 @@ func (f configureControl) modifyTestEnv(te *testEnv) {
|
||||
|
||||
// newTestEnv starts a bunch of services and returns a new test environment.
|
||||
// newTestEnv arranges for the environment's resources to be cleaned up on exit.
|
||||
func newTestEnv(t testing.TB, bins *Binaries, opts ...testEnvOpt) *testEnv {
|
||||
func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("not tested/working on Windows yet")
|
||||
}
|
||||
@ -469,7 +451,8 @@ func newTestEnv(t testing.TB, bins *Binaries, opts ...testEnvOpt) *testEnv {
|
||||
trafficTrap := new(trafficTrap)
|
||||
e := &testEnv{
|
||||
t: t,
|
||||
Binaries: bins,
|
||||
cli: TailscaleBinary(t),
|
||||
daemon: TailscaledBinary(t),
|
||||
LogCatcher: logc,
|
||||
LogCatcherServer: httptest.NewServer(logc),
|
||||
Control: control,
|
||||
@ -666,7 +649,7 @@ func (n *testNode) StartDaemon(t testing.TB) *Daemon {
|
||||
}
|
||||
|
||||
func (n *testNode) StartDaemonAsIPNGOOS(t testing.TB, ipnGOOS string) *Daemon {
|
||||
cmd := exec.Command(n.env.Binaries.Daemon,
|
||||
cmd := exec.Command(n.env.daemon,
|
||||
"--tun=userspace-networking",
|
||||
"--state="+n.stateFile,
|
||||
"--socket="+n.sockFile,
|
||||
@ -810,7 +793,7 @@ func (n *testNode) AwaitNeedsLogin(t testing.TB) {
|
||||
// Tailscale returns a command that runs the tailscale CLI with the provided arguments.
|
||||
// It does not start the process.
|
||||
func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
|
||||
cmd := exec.Command(n.env.Binaries.CLI, "--socket="+n.sockFile)
|
||||
cmd := exec.Command(n.env.cli, "--socket="+n.sockFile)
|
||||
cmd.Args = append(cmd.Args, arg...)
|
||||
cmd.Dir = n.dir
|
||||
cmd.Env = append(os.Environ(),
|
||||
|
@ -35,7 +35,9 @@
|
||||
type Harness struct {
|
||||
testerDialer proxy.Dialer
|
||||
testerDir string
|
||||
bins *integration.Binaries
|
||||
binaryDir string
|
||||
cli string
|
||||
daemon string
|
||||
pubKey string
|
||||
signer ssh.Signer
|
||||
cs *testcontrol.Server
|
||||
@ -134,11 +136,11 @@ func newHarness(t *testing.T) *Harness {
|
||||
loginServer := fmt.Sprintf("http://%s", ln.Addr())
|
||||
t.Logf("loginServer: %s", loginServer)
|
||||
|
||||
bins := integration.BuildTestBinaries(t)
|
||||
|
||||
h := &Harness{
|
||||
pubKey: string(pubkey),
|
||||
bins: bins,
|
||||
binaryDir: integration.BinaryDir(t),
|
||||
cli: integration.TailscaleBinary(t),
|
||||
daemon: integration.TailscaledBinary(t),
|
||||
signer: signer,
|
||||
loginServerURL: loginServer,
|
||||
cs: cs,
|
||||
@ -146,7 +148,7 @@ func newHarness(t *testing.T) *Harness {
|
||||
ipMap: ipMap,
|
||||
}
|
||||
|
||||
h.makeTestNode(t, bins, loginServer)
|
||||
h.makeTestNode(t, loginServer)
|
||||
|
||||
return h
|
||||
}
|
||||
@ -156,7 +158,7 @@ func (h *Harness) Tailscale(t *testing.T, args ...string) []byte {
|
||||
|
||||
args = append([]string{"--socket=" + filepath.Join(h.testerDir, "sock")}, args...)
|
||||
|
||||
cmd := exec.Command(h.bins.CLI, args...)
|
||||
cmd := exec.Command(h.cli, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -169,7 +171,7 @@ func (h *Harness) Tailscale(t *testing.T, args ...string) []byte {
|
||||
// enables us to make connections to and from the tailscale network being
|
||||
// tested. This mutates the Harness to allow tests to dial into the tailscale
|
||||
// network as well as control the tester's tailscaled.
|
||||
func (h *Harness) makeTestNode(t *testing.T, bins *integration.Binaries, controlURL string) {
|
||||
func (h *Harness) makeTestNode(t *testing.T, controlURL string) {
|
||||
dir := t.TempDir()
|
||||
h.testerDir = dir
|
||||
|
||||
@ -179,7 +181,7 @@ func (h *Harness) makeTestNode(t *testing.T, bins *integration.Binaries, control
|
||||
}
|
||||
|
||||
cmd := exec.Command(
|
||||
bins.Daemon,
|
||||
h.daemon,
|
||||
"--tun=userspace-networking",
|
||||
"--state="+filepath.Join(dir, "state.json"),
|
||||
"--socket="+filepath.Join(dir, "sock"),
|
||||
@ -222,7 +224,7 @@ func (h *Harness) makeTestNode(t *testing.T, bins *integration.Binaries, control
|
||||
}
|
||||
}
|
||||
|
||||
run(t, dir, bins.CLI,
|
||||
run(t, dir, h.cli,
|
||||
"--socket="+filepath.Join(dir, "sock"),
|
||||
"up",
|
||||
"--login-server="+controlURL,
|
||||
|
@ -17,7 +17,6 @@
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"tailscale.com/tstest/integration"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
@ -153,15 +152,15 @@
|
||||
systemd.services.tailscaled.environment."TS_LOG_TARGET" = "{{.LogTarget}}";
|
||||
}`
|
||||
|
||||
func copyUnit(t *testing.T, bins *integration.Binaries) {
|
||||
func (h *Harness) copyUnit(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
data, err := os.ReadFile("../../../cmd/tailscaled/tailscaled.service")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
os.MkdirAll(filepath.Join(bins.Dir, "systemd"), 0755)
|
||||
err = os.WriteFile(filepath.Join(bins.Dir, "systemd", "tailscaled.service"), data, 0666)
|
||||
os.MkdirAll(filepath.Join(h.binaryDir, "systemd"), 0755)
|
||||
err = os.WriteFile(filepath.Join(h.binaryDir, "systemd", "tailscaled.service"), data, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -172,7 +171,7 @@ func (h *Harness) makeNixOSImage(t *testing.T, d Distro, cdir string) string {
|
||||
t.Skip("https://github.com/NixOS/nixpkgs/issues/131098")
|
||||
}
|
||||
|
||||
copyUnit(t, h.bins)
|
||||
h.copyUnit(t)
|
||||
dir := t.TempDir()
|
||||
fname := filepath.Join(dir, d.Name+".nix")
|
||||
fout, err := os.Create(fname)
|
||||
@ -185,7 +184,7 @@ func (h *Harness) makeNixOSImage(t *testing.T, d Distro, cdir string) string {
|
||||
BinPath string
|
||||
LogTarget string
|
||||
}{
|
||||
BinPath: h.bins.Dir,
|
||||
BinPath: h.binaryDir,
|
||||
LogTarget: h.loginServerURL,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -290,7 +290,6 @@ func checkCachedImageHash(t *testing.T, d Distro, cacheDir string) string {
|
||||
}
|
||||
|
||||
func (h *Harness) copyBinaries(t *testing.T, d Distro, conn *ssh.Client) {
|
||||
bins := h.bins
|
||||
if strings.HasPrefix(d.Name, "nixos") {
|
||||
return
|
||||
}
|
||||
@ -305,8 +304,8 @@ func (h *Harness) copyBinaries(t *testing.T, d Distro, conn *ssh.Client) {
|
||||
mkdir(t, cli, "/etc/default")
|
||||
mkdir(t, cli, "/var/lib/tailscale")
|
||||
|
||||
copyFile(t, cli, bins.Daemon, "/usr/sbin/tailscaled")
|
||||
copyFile(t, cli, bins.CLI, "/usr/bin/tailscale")
|
||||
copyFile(t, cli, h.daemon, "/usr/sbin/tailscaled")
|
||||
copyFile(t, cli, h.cli, "/usr/bin/tailscale")
|
||||
|
||||
// TODO(Xe): revisit this assumption before it breaks the test.
|
||||
copyFile(t, cli, "../../../cmd/tailscaled/tailscaled.defaults", "/etc/default/tailscaled")
|
||||
|
@ -30,6 +30,7 @@
|
||||
"golang.org/x/sync/semaphore"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/integration"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
@ -52,6 +53,13 @@
|
||||
}()
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
v := m.Run()
|
||||
integration.CleanupBinaries()
|
||||
os.Exit(v)
|
||||
}
|
||||
|
||||
func TestDownloadImages(t *testing.T) {
|
||||
if !*runVMTests {
|
||||
t.Skip("not running integration tests (need --run-vm-tests)")
|
||||
|
Loading…
x
Reference in New Issue
Block a user