2021-04-29 21:44:08 +00:00
|
|
|
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package integration
|
|
|
|
|
2021-07-07 19:01:57 +00:00
|
|
|
//go:generate go run gen_deps.go
|
|
|
|
|
2021-04-29 21:44:08 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
2021-06-10 22:12:25 +00:00
|
|
|
"context"
|
2021-04-29 21:44:08 +00:00
|
|
|
"encoding/json"
|
2021-05-12 21:43:43 +00:00
|
|
|
"errors"
|
2021-05-12 04:57:25 +00:00
|
|
|
"flag"
|
2021-04-29 21:44:08 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"path/filepath"
|
2021-05-12 04:57:25 +00:00
|
|
|
"regexp"
|
2021-04-29 21:44:08 +00:00
|
|
|
"runtime"
|
2021-07-09 15:51:30 +00:00
|
|
|
"strings"
|
2021-04-29 21:44:08 +00:00
|
|
|
"sync"
|
2021-05-03 17:49:45 +00:00
|
|
|
"sync/atomic"
|
2021-04-29 21:44:08 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"go4.org/mem"
|
2021-07-09 15:51:30 +00:00
|
|
|
"inet.af/netaddr"
|
2021-07-13 21:03:05 +00:00
|
|
|
"tailscale.com/ipn"
|
2021-05-03 21:22:18 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2021-05-03 17:49:45 +00:00
|
|
|
"tailscale.com/safesocket"
|
|
|
|
"tailscale.com/tailcfg"
|
2021-04-29 21:44:08 +00:00
|
|
|
"tailscale.com/tstest"
|
2021-04-30 04:52:31 +00:00
|
|
|
"tailscale.com/tstest/integration/testcontrol"
|
2021-05-03 17:49:45 +00:00
|
|
|
"tailscale.com/types/logger"
|
2021-04-29 21:44:08 +00:00
|
|
|
)
|
|
|
|
|
2021-06-10 18:23:53 +00:00
|
|
|
var (
|
2021-06-10 22:12:25 +00:00
|
|
|
verboseTailscaled = flag.Bool("verbose-tailscaled", false, "verbose tailscaled logging")
|
2021-07-19 18:07:42 +00:00
|
|
|
verboseTailscale = flag.Bool("verbose-tailscale", false, "verbose tailscale CLI logging")
|
2021-06-10 18:23:53 +00:00
|
|
|
)
|
2021-05-12 04:57:25 +00:00
|
|
|
|
2021-05-03 17:49:45 +00:00
|
|
|
var mainError atomic.Value // of error
|
|
|
|
|
|
|
|
func TestMain(m *testing.M) {
|
2021-06-22 22:29:01 +00:00
|
|
|
// Have to disable UPnP which hits the network, otherwise it fails due to HTTP proxy.
|
|
|
|
os.Setenv("TS_DISABLE_UPNP", "true")
|
2021-06-10 22:12:25 +00:00
|
|
|
flag.Parse()
|
2021-05-03 17:49:45 +00:00
|
|
|
v := m.Run()
|
2021-07-20 20:55:09 +00:00
|
|
|
CleanupBinaries()
|
2021-05-03 17:49:45 +00:00
|
|
|
if v != 0 {
|
|
|
|
os.Exit(v)
|
|
|
|
}
|
|
|
|
if err, ok := mainError.Load().(error); ok {
|
|
|
|
fmt.Fprintf(os.Stderr, "FAIL: %v\n", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
2021-12-03 22:28:36 +00:00
|
|
|
func TestOneNodeUpNoAuth(t *testing.T) {
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-05-01 03:27:05 +00:00
|
|
|
n1 := newTestNode(t, env)
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
|
|
|
n1.AwaitResponding()
|
2021-05-12 21:43:43 +00:00
|
|
|
n1.MustUp()
|
2021-04-30 04:52:31 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
t.Logf("Got IP: %v", n1.AwaitIP())
|
|
|
|
n1.AwaitRunning()
|
2021-05-12 04:57:25 +00:00
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
|
|
|
|
t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests())
|
|
|
|
}
|
|
|
|
|
2021-10-29 00:44:18 +00:00
|
|
|
func TestOneNodeExpiredKey(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-10-29 00:44:18 +00:00
|
|
|
n1 := newTestNode(t, env)
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
|
|
|
n1.AwaitResponding()
|
2021-10-29 00:44:18 +00:00
|
|
|
n1.MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
2021-10-29 00:44:18 +00:00
|
|
|
|
|
|
|
nodes := env.Control.AllNodes()
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("expected 1 node, got %d nodes", len(nodes))
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeKey := nodes[0].Key
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
if err := env.Control.AwaitNodeInMapRequest(ctx, nodeKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
env.Control.SetExpireAllNodes(true)
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitNeedsLogin()
|
2021-10-29 00:44:18 +00:00
|
|
|
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
if err := env.Control.AwaitNodeInMapRequest(ctx, nodeKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
env.Control.SetExpireAllNodes(false)
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
2021-10-29 00:44:18 +00:00
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-07-28 22:17:31 +00:00
|
|
|
func TestCollectPanic(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-07-28 22:17:31 +00:00
|
|
|
n := newTestNode(t, env)
|
|
|
|
|
2021-07-20 20:55:09 +00:00
|
|
|
cmd := exec.Command(env.daemon, "--cleanup")
|
2021-07-28 22:17:31 +00:00
|
|
|
cmd.Env = append(os.Environ(),
|
|
|
|
"TS_PLEASE_PANIC=1",
|
|
|
|
"TS_LOG_TARGET="+n.env.LogCatcherServer.URL,
|
|
|
|
)
|
|
|
|
got, _ := cmd.CombinedOutput() // we expect it to fail, ignore err
|
|
|
|
t.Logf("initial run: %s", got)
|
|
|
|
|
|
|
|
// Now we run it again, and on start, it will upload the logs to logcatcher.
|
2021-07-20 20:55:09 +00:00
|
|
|
cmd = exec.Command(env.daemon, "--cleanup")
|
2021-07-28 22:17:31 +00:00
|
|
|
cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL)
|
|
|
|
if out, err := cmd.CombinedOutput(); err != nil {
|
|
|
|
t.Fatalf("cleanup failed: %v: %q", err, out)
|
|
|
|
}
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
const sub = `panic`
|
|
|
|
if !n.env.LogCatcher.logsContains(mem.S(sub)) {
|
|
|
|
return fmt.Errorf("log catcher didn't see %#q; got %s", sub, n.env.LogCatcher.logsString())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-17 02:36:04 +00:00
|
|
|
func TestControlTimeLogLine(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
env := newTestEnv(t)
|
|
|
|
n := newTestNode(t, env)
|
|
|
|
|
|
|
|
n.StartDaemon()
|
|
|
|
n.AwaitResponding()
|
|
|
|
n.MustUp()
|
|
|
|
n.AwaitRunning()
|
|
|
|
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
const sub = `netmap: control time is 2020-08-03T00:00:00.000000001Z`
|
|
|
|
if !n.env.LogCatcher.logsContains(mem.S(sub)) {
|
|
|
|
return fmt.Errorf("log catcher didn't see %#q; got %s", sub, n.env.LogCatcher.logsString())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-13 21:03:05 +00:00
|
|
|
// test Issue 2321: Start with UpdatePrefs should save prefs to disk
|
|
|
|
func TestStateSavedOnStart(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-07-13 21:03:05 +00:00
|
|
|
n1 := newTestNode(t, env)
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
|
|
|
n1.AwaitResponding()
|
2021-07-13 21:03:05 +00:00
|
|
|
n1.MustUp()
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
t.Logf("Got IP: %v", n1.AwaitIP())
|
|
|
|
n1.AwaitRunning()
|
2021-07-13 21:03:05 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
p1 := n1.diskPrefs()
|
2021-07-13 21:03:05 +00:00
|
|
|
t.Logf("Prefs1: %v", p1.Pretty())
|
|
|
|
|
|
|
|
// Bring it down, to prevent an EditPrefs call in the
|
|
|
|
// subsequent "up", as we want to test the bug when
|
|
|
|
// cmd/tailscale implements "up" via LocalBackend.Start.
|
|
|
|
n1.MustDown()
|
|
|
|
|
|
|
|
// And change the hostname to something:
|
|
|
|
if err := n1.Tailscale("up", "--login-server="+n1.env.ControlServer.URL, "--hostname=foo").Run(); err != nil {
|
|
|
|
t.Fatalf("up: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
p2 := n1.diskPrefs()
|
2021-07-13 21:03:05 +00:00
|
|
|
if pretty := p1.Pretty(); pretty == p2.Pretty() {
|
|
|
|
t.Errorf("Prefs didn't change on disk after 'up', still: %s", pretty)
|
|
|
|
}
|
|
|
|
if p2.Hostname != "foo" {
|
|
|
|
t.Errorf("Prefs.Hostname = %q; want foo", p2.Hostname)
|
|
|
|
}
|
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-12-03 22:28:36 +00:00
|
|
|
func TestOneNodeUpAuth(t *testing.T) {
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t, configureControl(func(control *testcontrol.Server) {
|
2021-06-22 17:24:05 +00:00
|
|
|
control.RequireAuth = true
|
|
|
|
}))
|
2021-05-12 04:57:25 +00:00
|
|
|
|
|
|
|
n1 := newTestNode(t, env)
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
2021-05-12 04:57:25 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitListening()
|
2021-05-12 04:57:25 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
st := n1.MustStatus()
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Logf("Status: %s", st.BackendState)
|
|
|
|
|
|
|
|
t.Logf("Running up --login-server=%s ...", env.ControlServer.URL)
|
|
|
|
|
|
|
|
cmd := n1.Tailscale("up", "--login-server="+env.ControlServer.URL)
|
|
|
|
var authCountAtomic int32
|
|
|
|
cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error {
|
|
|
|
if env.Control.CompleteAuth(urlStr) {
|
|
|
|
atomic.AddInt32(&authCountAtomic, 1)
|
|
|
|
t.Logf("completed auth path %s", urlStr)
|
|
|
|
return nil
|
2021-04-30 04:52:31 +00:00
|
|
|
}
|
2021-05-12 04:57:25 +00:00
|
|
|
err := fmt.Errorf("Failed to complete auth path to %q", urlStr)
|
|
|
|
t.Log(err)
|
|
|
|
return err
|
|
|
|
}}
|
|
|
|
cmd.Stderr = cmd.Stdout
|
|
|
|
if err := cmd.Run(); err != nil {
|
|
|
|
t.Fatalf("up: %v", err)
|
2021-04-30 04:52:31 +00:00
|
|
|
}
|
2021-12-16 01:05:21 +00:00
|
|
|
t.Logf("Got IP: %v", n1.AwaitIP())
|
2021-04-30 04:52:31 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
2021-04-29 21:44:08 +00:00
|
|
|
|
2021-05-12 04:57:25 +00:00
|
|
|
if n := atomic.LoadInt32(&authCountAtomic); n != 1 {
|
|
|
|
t.Errorf("Auth URLs completed = %d; want 1", n)
|
2021-04-29 21:44:08 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 04:57:25 +00:00
|
|
|
d1.MustCleanShutdown(t)
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 21:43:43 +00:00
|
|
|
func TestTwoNodes(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-05-12 21:43:43 +00:00
|
|
|
|
|
|
|
// Create two nodes:
|
|
|
|
n1 := newTestNode(t, env)
|
2021-06-28 16:33:42 +00:00
|
|
|
n1SocksAddrCh := n1.socks5AddrChan()
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
2021-05-12 21:43:43 +00:00
|
|
|
|
|
|
|
n2 := newTestNode(t, env)
|
2021-06-28 16:33:42 +00:00
|
|
|
n2SocksAddrCh := n2.socks5AddrChan()
|
2021-12-16 01:05:21 +00:00
|
|
|
d2 := n2.StartDaemon()
|
2021-05-12 21:43:43 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1Socks := n1.AwaitSocksAddr(n1SocksAddrCh)
|
|
|
|
n2Socks := n1.AwaitSocksAddr(n2SocksAddrCh)
|
2021-06-28 16:33:42 +00:00
|
|
|
t.Logf("node1 SOCKS5 addr: %v", n1Socks)
|
|
|
|
t.Logf("node2 SOCKS5 addr: %v", n2Socks)
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitListening()
|
|
|
|
n2.AwaitListening()
|
2021-05-12 21:43:43 +00:00
|
|
|
n1.MustUp()
|
|
|
|
n2.MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
|
|
|
n2.AwaitRunning()
|
2021-05-12 21:43:43 +00:00
|
|
|
|
|
|
|
if err := tstest.WaitFor(2*time.Second, func() error {
|
2021-12-16 01:05:21 +00:00
|
|
|
st := n1.MustStatus()
|
2021-05-12 21:43:43 +00:00
|
|
|
if len(st.Peer) == 0 {
|
|
|
|
return errors.New("no peers")
|
|
|
|
}
|
|
|
|
if len(st.Peer) > 1 {
|
|
|
|
return fmt.Errorf("got %d peers; want 1", len(st.Peer))
|
|
|
|
}
|
|
|
|
peer := st.Peer[st.Peers()[0]]
|
|
|
|
if peer.ID == st.Self.ID {
|
|
|
|
return errors.New("peer is self")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
d2.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-05-18 20:20:29 +00:00
|
|
|
func TestNodeAddressIPFields(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-05-18 20:20:29 +00:00
|
|
|
n1 := newTestNode(t, env)
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
2021-05-18 20:20:29 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitListening()
|
2021-05-18 20:20:29 +00:00
|
|
|
n1.MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
2021-05-18 20:20:29 +00:00
|
|
|
|
|
|
|
testNodes := env.Control.AllNodes()
|
|
|
|
|
|
|
|
if len(testNodes) != 1 {
|
|
|
|
t.Errorf("Expected %d nodes, got %d", 1, len(testNodes))
|
|
|
|
}
|
|
|
|
node := testNodes[0]
|
|
|
|
if len(node.Addresses) == 0 {
|
|
|
|
t.Errorf("Empty Addresses field in node")
|
|
|
|
}
|
|
|
|
if len(node.AllowedIPs) == 0 {
|
|
|
|
t.Errorf("Empty AllowedIPs field in node")
|
|
|
|
}
|
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-06-02 15:23:24 +00:00
|
|
|
func TestAddPingRequest(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-06-02 15:23:24 +00:00
|
|
|
n1 := newTestNode(t, env)
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.StartDaemon()
|
2021-06-02 15:23:24 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitListening()
|
2021-06-02 15:23:24 +00:00
|
|
|
n1.MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
n1.AwaitRunning()
|
2021-06-02 15:23:24 +00:00
|
|
|
|
|
|
|
gotPing := make(chan bool, 1)
|
|
|
|
waitPing := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
gotPing <- true
|
|
|
|
}))
|
|
|
|
defer waitPing.Close()
|
|
|
|
|
|
|
|
nodes := env.Control.AllNodes()
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("expected 1 node, got %d nodes", len(nodes))
|
|
|
|
}
|
|
|
|
|
2021-11-02 03:55:52 +00:00
|
|
|
nodeKey := nodes[0].Key
|
2021-06-15 19:41:06 +00:00
|
|
|
|
|
|
|
// Check that we get at least one ping reply after 10 tries.
|
|
|
|
for try := 1; try <= 10; try++ {
|
|
|
|
t.Logf("ping %v ...", try)
|
2021-06-10 22:12:25 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
if err := env.Control.AwaitNodeInMapRequest(ctx, nodeKey); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
cancel()
|
2021-06-15 19:41:06 +00:00
|
|
|
|
|
|
|
pr := &tailcfg.PingRequest{URL: fmt.Sprintf("%s/ping-%d", waitPing.URL, try), Log: true}
|
|
|
|
if !env.Control.AddPingRequest(nodeKey, pr) {
|
|
|
|
t.Logf("failed to AddPingRequest")
|
|
|
|
continue
|
2021-06-10 22:12:25 +00:00
|
|
|
}
|
2021-06-02 15:23:24 +00:00
|
|
|
|
2021-06-10 22:12:25 +00:00
|
|
|
// Wait for PingRequest to come back
|
|
|
|
pingTimeout := time.NewTimer(2 * time.Second)
|
2021-06-15 19:41:06 +00:00
|
|
|
defer pingTimeout.Stop()
|
2021-06-10 22:12:25 +00:00
|
|
|
select {
|
|
|
|
case <-gotPing:
|
2021-06-15 19:41:06 +00:00
|
|
|
t.Logf("got ping; success")
|
|
|
|
return
|
2021-06-10 22:12:25 +00:00
|
|
|
case <-pingTimeout.C:
|
2021-06-15 19:41:06 +00:00
|
|
|
// Try again.
|
2021-06-10 22:12:25 +00:00
|
|
|
}
|
2021-06-02 15:23:24 +00:00
|
|
|
}
|
2021-06-15 19:41:06 +00:00
|
|
|
t.Error("all ping attempts failed")
|
2021-06-02 15:23:24 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 22:21:00 +00:00
|
|
|
// Issue 2434: when "down" (WantRunning false), tailscaled shouldn't
|
|
|
|
// be connected to control.
|
2021-07-20 19:13:20 +00:00
|
|
|
func TestNoControlConnWhenDown(t *testing.T) {
|
2021-07-16 22:21:00 +00:00
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-07-16 22:21:00 +00:00
|
|
|
n1 := newTestNode(t, env)
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemon()
|
|
|
|
n1.AwaitResponding()
|
2021-07-16 22:21:00 +00:00
|
|
|
|
|
|
|
// Come up the first time.
|
|
|
|
n1.MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
ip1 := n1.AwaitIP()
|
|
|
|
n1.AwaitRunning()
|
2021-07-16 22:21:00 +00:00
|
|
|
|
|
|
|
// Then bring it down and stop the daemon.
|
|
|
|
n1.MustDown()
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
|
|
|
|
env.LogCatcher.Reset()
|
2021-12-16 01:05:21 +00:00
|
|
|
d2 := n1.StartDaemon()
|
|
|
|
n1.AwaitResponding()
|
2021-07-16 22:21:00 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
st := n1.MustStatus()
|
2021-07-16 22:21:00 +00:00
|
|
|
if got, want := st.BackendState, "Stopped"; got != want {
|
|
|
|
t.Fatalf("after restart, state = %q; want %q", got, want)
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
ip2 := n1.AwaitIP()
|
2021-07-16 22:21:00 +00:00
|
|
|
if ip1 != ip2 {
|
|
|
|
t.Errorf("IPs different: %q vs %q", ip1, ip2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The real test: verify our daemon doesn't have an HTTP request open.:
|
|
|
|
if n := env.Control.InServeMap(); n != 0 {
|
|
|
|
t.Errorf("in serve map = %d; want 0", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
d2.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-07-19 18:07:42 +00:00
|
|
|
// Issue 2137: make sure Windows tailscaled works with the CLI alone,
|
|
|
|
// without the GUI to kick off a Start.
|
2021-07-20 19:13:20 +00:00
|
|
|
func TestOneNodeUpWindowsStyle(t *testing.T) {
|
2021-07-19 18:07:42 +00:00
|
|
|
t.Parallel()
|
2021-07-20 20:55:09 +00:00
|
|
|
env := newTestEnv(t)
|
2021-07-19 18:07:42 +00:00
|
|
|
n1 := newTestNode(t, env)
|
|
|
|
n1.upFlagGOOS = "windows"
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
d1 := n1.StartDaemonAsIPNGOOS("windows")
|
|
|
|
n1.AwaitResponding()
|
2021-07-19 18:07:42 +00:00
|
|
|
n1.MustUp("--unattended")
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
t.Logf("Got IP: %v", n1.AwaitIP())
|
|
|
|
n1.AwaitRunning()
|
2021-07-19 18:07:42 +00:00
|
|
|
|
|
|
|
d1.MustCleanShutdown(t)
|
|
|
|
}
|
|
|
|
|
2021-12-15 23:55:02 +00:00
|
|
|
func TestLogoutRemovesAllPeers(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
env := newTestEnv(t)
|
|
|
|
// Spin up some nodes.
|
|
|
|
nodes := make([]*testNode, 2)
|
|
|
|
for i := range nodes {
|
|
|
|
nodes[i] = newTestNode(t, env)
|
2021-12-16 01:05:21 +00:00
|
|
|
nodes[i].StartDaemon()
|
|
|
|
nodes[i].AwaitResponding()
|
2021-12-15 23:55:02 +00:00
|
|
|
nodes[i].MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
nodes[i].AwaitIP()
|
|
|
|
nodes[i].AwaitRunning()
|
2021-12-15 23:55:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make every node ping every other node.
|
|
|
|
// This makes sure magicsock is fully populated.
|
|
|
|
for i := range nodes {
|
|
|
|
for j := range nodes {
|
|
|
|
if i <= j {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
return nodes[i].Ping(nodes[j])
|
|
|
|
}); err != nil {
|
2021-12-16 01:05:21 +00:00
|
|
|
t.Fatalf("ping %v -> %v: %v", nodes[i].AwaitIP(), nodes[j].AwaitIP(), err)
|
2021-12-15 23:55:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wantNode0PeerCount waits until node[0] status includes exactly want peers.
|
|
|
|
wantNode0PeerCount := func(want int) {
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
2021-12-16 01:05:21 +00:00
|
|
|
s := nodes[0].MustStatus()
|
2021-12-15 23:55:02 +00:00
|
|
|
if peers := s.Peers(); len(peers) != want {
|
|
|
|
return fmt.Errorf("want %d peer(s) in status, got %v", want, peers)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
wantNode0PeerCount(len(nodes) - 1) // all other nodes are peers
|
|
|
|
nodes[0].MustLogOut()
|
|
|
|
wantNode0PeerCount(0) // node[0] is logged out, so it should not have any peers
|
|
|
|
nodes[0].MustUp()
|
2021-12-16 01:05:21 +00:00
|
|
|
nodes[0].AwaitIP()
|
2021-12-15 23:55:02 +00:00
|
|
|
wantNode0PeerCount(len(nodes) - 1) // all other nodes are peers again
|
|
|
|
}
|
|
|
|
|
2021-05-01 03:27:05 +00:00
|
|
|
// testEnv contains the test environment (set of servers) used by one
|
|
|
|
// or more nodes.
|
|
|
|
type testEnv struct {
|
2021-07-20 20:55:09 +00:00
|
|
|
t testing.TB
|
|
|
|
cli string
|
|
|
|
daemon string
|
2021-05-01 03:27:05 +00:00
|
|
|
|
2021-07-08 18:39:45 +00:00
|
|
|
LogCatcher *LogCatcher
|
2021-05-01 03:27:05 +00:00
|
|
|
LogCatcherServer *httptest.Server
|
|
|
|
|
|
|
|
Control *testcontrol.Server
|
|
|
|
ControlServer *httptest.Server
|
|
|
|
|
2021-05-03 21:22:18 +00:00
|
|
|
TrafficTrap *trafficTrap
|
|
|
|
TrafficTrapServer *httptest.Server
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
|
|
|
|
2021-06-22 17:24:05 +00:00
|
|
|
type testEnvOpt interface {
|
|
|
|
modifyTestEnv(*testEnv)
|
|
|
|
}
|
|
|
|
|
|
|
|
type configureControl func(*testcontrol.Server)
|
|
|
|
|
|
|
|
func (f configureControl) modifyTestEnv(te *testEnv) {
|
|
|
|
f(te.Control)
|
|
|
|
}
|
|
|
|
|
2021-12-01 18:24:25 +00:00
|
|
|
// newTestEnv starts a bunch of services and returns a new test environment.
|
|
|
|
// newTestEnv arranges for the environment's resources to be cleaned up on exit.
|
2021-07-20 20:55:09 +00:00
|
|
|
func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv {
|
2021-05-12 04:57:25 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("not tested/working on Windows yet")
|
|
|
|
}
|
2021-06-25 19:59:45 +00:00
|
|
|
derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1")
|
2021-07-08 18:39:45 +00:00
|
|
|
logc := new(LogCatcher)
|
2021-05-03 17:49:45 +00:00
|
|
|
control := &testcontrol.Server{
|
|
|
|
DERPMap: derpMap,
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
2021-06-18 17:09:16 +00:00
|
|
|
control.HTTPTestServer = httptest.NewUnstartedServer(control)
|
2021-05-03 21:22:18 +00:00
|
|
|
trafficTrap := new(trafficTrap)
|
2021-05-03 17:49:45 +00:00
|
|
|
e := &testEnv{
|
2021-05-12 04:57:25 +00:00
|
|
|
t: t,
|
2021-07-20 20:55:09 +00:00
|
|
|
cli: TailscaleBinary(t),
|
|
|
|
daemon: TailscaledBinary(t),
|
2021-05-03 21:22:18 +00:00
|
|
|
LogCatcher: logc,
|
|
|
|
LogCatcherServer: httptest.NewServer(logc),
|
|
|
|
Control: control,
|
2021-06-18 17:09:16 +00:00
|
|
|
ControlServer: control.HTTPTestServer,
|
2021-05-03 21:22:18 +00:00
|
|
|
TrafficTrap: trafficTrap,
|
|
|
|
TrafficTrapServer: httptest.NewServer(trafficTrap),
|
2021-05-03 17:49:45 +00:00
|
|
|
}
|
2021-06-22 17:24:05 +00:00
|
|
|
for _, o := range opts {
|
|
|
|
o.modifyTestEnv(e)
|
|
|
|
}
|
|
|
|
control.HTTPTestServer.Start()
|
2021-12-01 18:24:25 +00:00
|
|
|
t.Cleanup(func() {
|
|
|
|
// Shut down e.
|
|
|
|
if err := e.TrafficTrap.Err(); err != nil {
|
|
|
|
e.t.Errorf("traffic trap: %v", err)
|
|
|
|
e.t.Logf("logs: %s", e.LogCatcher.logsString())
|
|
|
|
}
|
|
|
|
e.LogCatcherServer.Close()
|
|
|
|
e.TrafficTrapServer.Close()
|
|
|
|
e.ControlServer.Close()
|
|
|
|
})
|
2021-05-03 17:49:45 +00:00
|
|
|
return e
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// testNode is a machine with a tailscale & tailscaled.
|
|
|
|
// Currently, the test is simplistic and user==node==machine.
|
|
|
|
// That may grow complexity later to test more.
|
|
|
|
type testNode struct {
|
|
|
|
env *testEnv
|
|
|
|
|
2021-07-19 18:07:42 +00:00
|
|
|
dir string // temp dir for sock & state
|
|
|
|
sockFile string
|
|
|
|
stateFile string
|
|
|
|
upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI
|
2021-06-28 16:33:42 +00:00
|
|
|
|
|
|
|
mu sync.Mutex
|
|
|
|
onLogLine []func([]byte)
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTestNode allocates a temp directory for a new test node.
|
|
|
|
// The node is not started automatically.
|
|
|
|
func newTestNode(t *testing.T, env *testEnv) *testNode {
|
|
|
|
dir := t.TempDir()
|
2021-07-20 19:13:20 +00:00
|
|
|
sockFile := filepath.Join(dir, "tailscale.sock")
|
|
|
|
if len(sockFile) >= 104 {
|
|
|
|
t.Fatalf("sockFile path %q (len %v) is too long, must be < 104", sockFile, len(sockFile))
|
|
|
|
}
|
2021-05-01 03:27:05 +00:00
|
|
|
return &testNode{
|
|
|
|
env: env,
|
|
|
|
dir: dir,
|
2021-07-20 19:13:20 +00:00
|
|
|
sockFile: sockFile,
|
2021-05-01 03:27:05 +00:00
|
|
|
stateFile: filepath.Join(dir, "tailscale.state"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) diskPrefs() *ipn.Prefs {
|
|
|
|
t := n.env.t
|
2021-07-13 21:03:05 +00:00
|
|
|
t.Helper()
|
|
|
|
if _, err := ioutil.ReadFile(n.stateFile); err != nil {
|
|
|
|
t.Fatalf("reading prefs: %v", err)
|
|
|
|
}
|
|
|
|
fs, err := ipn.NewFileStore(n.stateFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("reading prefs, NewFileStore: %v", err)
|
|
|
|
}
|
|
|
|
prefBytes, err := fs.ReadState(ipn.GlobalDaemonStateKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("reading prefs, ReadState: %v", err)
|
|
|
|
}
|
|
|
|
p := new(ipn.Prefs)
|
|
|
|
if err := json.Unmarshal(prefBytes, p); err != nil {
|
|
|
|
t.Fatalf("reading prefs, JSON unmarshal: %v", err)
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2021-07-16 22:21:00 +00:00
|
|
|
// AwaitResponding waits for n's tailscaled to be up enough to be
|
|
|
|
// responding, but doesn't wait for any particular state.
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitResponding() {
|
|
|
|
t := n.env.t
|
2021-07-16 22:21:00 +00:00
|
|
|
t.Helper()
|
2021-12-16 01:05:21 +00:00
|
|
|
n.AwaitListening()
|
2021-07-16 22:21:00 +00:00
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
st := n.MustStatus()
|
2021-07-16 22:21:00 +00:00
|
|
|
t.Logf("Status: %s", st.BackendState)
|
|
|
|
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
const sub = `Program starting: `
|
|
|
|
if !n.env.LogCatcher.logsContains(mem.S(sub)) {
|
|
|
|
return fmt.Errorf("log catcher didn't see %#q; got %s", sub, n.env.LogCatcher.logsString())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:33:42 +00:00
|
|
|
// addLogLineHook registers a hook f to be called on each tailscaled
|
|
|
|
// log line output.
|
|
|
|
func (n *testNode) addLogLineHook(f func([]byte)) {
|
|
|
|
n.mu.Lock()
|
|
|
|
defer n.mu.Unlock()
|
|
|
|
n.onLogLine = append(n.onLogLine, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// socks5AddrChan returns a channel that receives the address (e.g. "localhost:23874")
|
|
|
|
// of the node's SOCKS5 listener, once started.
|
|
|
|
func (n *testNode) socks5AddrChan() <-chan string {
|
|
|
|
ch := make(chan string, 1)
|
|
|
|
n.addLogLineHook(func(line []byte) {
|
|
|
|
const sub = "SOCKS5 listening on "
|
|
|
|
i := mem.Index(mem.B(line), mem.S(sub))
|
|
|
|
if i == -1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
addr := string(line)[i+len(sub):]
|
|
|
|
select {
|
|
|
|
case ch <- addr:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitSocksAddr(ch <-chan string) string {
|
|
|
|
t := n.env.t
|
2021-06-28 16:33:42 +00:00
|
|
|
t.Helper()
|
|
|
|
timer := time.NewTimer(10 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
select {
|
|
|
|
case v := <-ch:
|
|
|
|
return v
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatal("timeout waiting for node to log its SOCK5 listening address")
|
|
|
|
panic("unreachable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodeOutputParser parses stderr of tailscaled processes, calling the
|
|
|
|
// per-line callbacks previously registered via
|
|
|
|
// testNode.addLogLineHook.
|
|
|
|
type nodeOutputParser struct {
|
|
|
|
buf bytes.Buffer
|
|
|
|
n *testNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func (op *nodeOutputParser) Write(p []byte) (n int, err error) {
|
|
|
|
n, err = op.buf.Write(p)
|
|
|
|
op.parseLines()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (op *nodeOutputParser) parseLines() {
|
|
|
|
n := op.n
|
|
|
|
buf := op.buf.Bytes()
|
|
|
|
for len(buf) > 0 {
|
|
|
|
nl := bytes.IndexByte(buf, '\n')
|
|
|
|
if nl == -1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
line := buf[:nl+1]
|
|
|
|
buf = buf[nl+1:]
|
|
|
|
lineTrim := bytes.TrimSpace(line)
|
|
|
|
|
|
|
|
n.mu.Lock()
|
|
|
|
for _, f := range n.onLogLine {
|
|
|
|
f(lineTrim)
|
|
|
|
}
|
|
|
|
n.mu.Unlock()
|
|
|
|
}
|
|
|
|
if len(buf) == 0 {
|
|
|
|
op.buf.Reset()
|
|
|
|
} else {
|
|
|
|
io.CopyN(ioutil.Discard, &op.buf, int64(op.buf.Len()-len(buf)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-12 04:57:25 +00:00
|
|
|
type Daemon struct {
|
|
|
|
Process *os.Process
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Daemon) MustCleanShutdown(t testing.TB) {
|
|
|
|
d.Process.Signal(os.Interrupt)
|
|
|
|
ps, err := d.Process.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("tailscaled Wait: %v", err)
|
|
|
|
}
|
|
|
|
if ps.ExitCode() != 0 {
|
|
|
|
t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-01 18:24:25 +00:00
|
|
|
// StartDaemon starts the node's tailscaled, failing if it fails to start.
|
|
|
|
// StartDaemon ensures that the process will exit when the test completes.
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) StartDaemon() *Daemon {
|
|
|
|
return n.StartDaemonAsIPNGOOS(runtime.GOOS)
|
2021-07-19 18:07:42 +00:00
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon {
|
|
|
|
t := n.env.t
|
2021-07-20 20:55:09 +00:00
|
|
|
cmd := exec.Command(n.env.daemon,
|
2021-05-01 03:27:05 +00:00
|
|
|
"--tun=userspace-networking",
|
|
|
|
"--state="+n.stateFile,
|
|
|
|
"--socket="+n.sockFile,
|
2021-06-28 16:33:42 +00:00
|
|
|
"--socks5-server=localhost:0",
|
2021-05-01 03:27:05 +00:00
|
|
|
)
|
2021-12-15 23:30:13 +00:00
|
|
|
if *verboseTailscaled {
|
|
|
|
cmd.Args = append(cmd.Args, "-verbose=2")
|
|
|
|
}
|
2021-05-01 03:27:05 +00:00
|
|
|
cmd.Env = append(os.Environ(),
|
|
|
|
"TS_LOG_TARGET="+n.env.LogCatcherServer.URL,
|
2021-05-03 21:22:18 +00:00
|
|
|
"HTTP_PROXY="+n.env.TrafficTrapServer.URL,
|
|
|
|
"HTTPS_PROXY="+n.env.TrafficTrapServer.URL,
|
2021-07-19 18:07:42 +00:00
|
|
|
"TS_DEBUG_TAILSCALED_IPN_GOOS="+ipnGOOS,
|
2021-07-20 21:10:11 +00:00
|
|
|
"TS_LOGS_DIR="+t.TempDir(),
|
2021-05-01 03:27:05 +00:00
|
|
|
)
|
2021-06-28 16:33:42 +00:00
|
|
|
cmd.Stderr = &nodeOutputParser{n: n}
|
2021-06-10 22:12:25 +00:00
|
|
|
if *verboseTailscaled {
|
|
|
|
cmd.Stdout = os.Stdout
|
2021-06-28 16:33:42 +00:00
|
|
|
cmd.Stderr = io.MultiWriter(cmd.Stderr, os.Stderr)
|
2021-06-10 22:12:25 +00:00
|
|
|
}
|
2021-05-01 03:27:05 +00:00
|
|
|
if err := cmd.Start(); err != nil {
|
|
|
|
t.Fatalf("starting tailscaled: %v", err)
|
|
|
|
}
|
2021-12-01 18:24:25 +00:00
|
|
|
t.Cleanup(func() { cmd.Process.Kill() })
|
2021-05-12 04:57:25 +00:00
|
|
|
return &Daemon{
|
|
|
|
Process: cmd.Process,
|
|
|
|
}
|
2021-05-01 03:27:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 18:07:42 +00:00
|
|
|
func (n *testNode) MustUp(extraArgs ...string) {
|
2021-05-12 21:43:43 +00:00
|
|
|
t := n.env.t
|
2021-07-19 18:07:42 +00:00
|
|
|
args := []string{
|
|
|
|
"up",
|
|
|
|
"--login-server=" + n.env.ControlServer.URL,
|
|
|
|
}
|
|
|
|
args = append(args, extraArgs...)
|
2021-12-03 22:29:54 +00:00
|
|
|
cmd := n.Tailscale(args...)
|
|
|
|
t.Logf("Running %v ...", cmd)
|
|
|
|
cmd.Stdout = nil // in case --verbose-tailscale was set
|
|
|
|
cmd.Stderr = nil // in case --verbose-tailscale was set
|
|
|
|
if b, err := cmd.CombinedOutput(); err != nil {
|
2021-10-26 17:19:35 +00:00
|
|
|
t.Fatalf("up: %v, %v", string(b), err)
|
2021-05-12 21:43:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-13 21:03:05 +00:00
|
|
|
func (n *testNode) MustDown() {
|
|
|
|
t := n.env.t
|
|
|
|
t.Logf("Running down ...")
|
|
|
|
if err := n.Tailscale("down").Run(); err != nil {
|
|
|
|
t.Fatalf("down: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-15 23:55:02 +00:00
|
|
|
func (n *testNode) MustLogOut() {
|
|
|
|
t := n.env.t
|
|
|
|
t.Logf("Running logout ...")
|
|
|
|
if err := n.Tailscale("logout").Run(); err != nil {
|
|
|
|
t.Fatalf("logout: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *testNode) Ping(otherNode *testNode) error {
|
|
|
|
t := n.env.t
|
2021-12-16 01:05:21 +00:00
|
|
|
ip := otherNode.AwaitIP().String()
|
|
|
|
t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP())
|
2021-12-15 23:55:02 +00:00
|
|
|
return n.Tailscale("ping", ip).Run()
|
|
|
|
}
|
|
|
|
|
2021-05-03 17:49:45 +00:00
|
|
|
// AwaitListening waits for the tailscaled to be serving local clients
|
|
|
|
// over its localhost IPC mechanism. (Unix socket, etc)
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitListening() {
|
|
|
|
t := n.env.t
|
safesocket: add ConnectionStrategy, provide control over fallbacks
fee2d9fad added support for cmd/tailscale to connect to IPNExtension.
It came in two parts: If no socket was provided, dial IPNExtension first,
and also, if dialing the socket failed, fall back to IPNExtension.
The second half of that support caused the integration tests to fail
when run on a machine that was also running IPNExtension.
The integration tests want to wait until the tailscaled instances
that they spun up are listening. They do that by dialing the new
instance. But when that dial failed, it was falling back to IPNExtension,
so it appeared (incorrectly) that tailscaled was running.
Hilarity predictably ensued.
If a user (or a test) explicitly provides a socket to dial,
it is a reasonable assumption that they have a specific tailscaled
in mind and don't want to fall back to IPNExtension.
It is certainly true of the integration tests.
Instead of adding a bool to Connect, split out the notion of a
connection strategy. For now, the implementation remains the same,
but with the details hidden a bit. Later, we can improve that.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-12-08 21:55:55 +00:00
|
|
|
s := safesocket.DefaultConnectionStrategy(n.sockFile)
|
|
|
|
s.UseFallback(false) // connect only to the tailscaled that we started
|
2021-05-03 17:49:45 +00:00
|
|
|
if err := tstest.WaitFor(20*time.Second, func() (err error) {
|
safesocket: add ConnectionStrategy, provide control over fallbacks
fee2d9fad added support for cmd/tailscale to connect to IPNExtension.
It came in two parts: If no socket was provided, dial IPNExtension first,
and also, if dialing the socket failed, fall back to IPNExtension.
The second half of that support caused the integration tests to fail
when run on a machine that was also running IPNExtension.
The integration tests want to wait until the tailscaled instances
that they spun up are listening. They do that by dialing the new
instance. But when that dial failed, it was falling back to IPNExtension,
so it appeared (incorrectly) that tailscaled was running.
Hilarity predictably ensued.
If a user (or a test) explicitly provides a socket to dial,
it is a reasonable assumption that they have a specific tailscaled
in mind and don't want to fall back to IPNExtension.
It is certainly true of the integration tests.
Instead of adding a bool to Connect, split out the notion of a
connection strategy. For now, the implementation remains the same,
but with the details hidden a bit. Later, we can improve that.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-12-08 21:55:55 +00:00
|
|
|
c, err := safesocket.Connect(s)
|
2021-05-03 17:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitIPs() []netaddr.IP {
|
|
|
|
t := n.env.t
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Helper()
|
2021-07-09 15:51:30 +00:00
|
|
|
var addrs []netaddr.IP
|
2021-05-12 04:57:25 +00:00
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
2021-07-19 18:07:42 +00:00
|
|
|
cmd := n.Tailscale("ip")
|
|
|
|
cmd.Stdout = nil // in case --verbose-tailscale was set
|
|
|
|
cmd.Stderr = nil // in case --verbose-tailscale was set
|
|
|
|
out, err := cmd.Output()
|
2021-05-12 04:57:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-09 15:51:30 +00:00
|
|
|
ips := string(out)
|
|
|
|
ipslice := strings.Fields(ips)
|
|
|
|
addrs = make([]netaddr.IP, len(ipslice))
|
|
|
|
|
|
|
|
for i, ip := range ipslice {
|
|
|
|
netIP, err := netaddr.ParseIP(ip)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
addrs[i] = netIP
|
|
|
|
}
|
2021-05-12 04:57:25 +00:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatalf("awaiting an IP address: %v", err)
|
|
|
|
}
|
2021-07-09 15:51:30 +00:00
|
|
|
if len(addrs) == 0 {
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Fatalf("returned IP address was blank")
|
|
|
|
}
|
2021-07-09 15:51:30 +00:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
|
|
|
// AwaitIP returns the IP address of n.
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitIP() netaddr.IP {
|
|
|
|
t := n.env.t
|
2021-07-09 15:51:30 +00:00
|
|
|
t.Helper()
|
2021-12-16 01:05:21 +00:00
|
|
|
ips := n.AwaitIPs()
|
2021-07-09 15:51:30 +00:00
|
|
|
return ips[0]
|
2021-05-12 04:57:25 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 22:21:00 +00:00
|
|
|
// AwaitRunning waits for n to reach the IPN state "Running".
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitRunning() {
|
|
|
|
t := n.env.t
|
2021-05-12 04:57:25 +00:00
|
|
|
t.Helper()
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
st, err := n.Status()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if st.BackendState != "Running" {
|
|
|
|
return fmt.Errorf("in state %q", st.BackendState)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatalf("failure/timeout waiting for transition to Running status: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-29 00:44:18 +00:00
|
|
|
// AwaitNeedsLogin waits for n to reach the IPN state "NeedsLogin".
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) AwaitNeedsLogin() {
|
|
|
|
t := n.env.t
|
2021-10-29 00:44:18 +00:00
|
|
|
t.Helper()
|
|
|
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
|
|
|
st, err := n.Status()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if st.BackendState != "NeedsLogin" {
|
|
|
|
return fmt.Errorf("in state %q", st.BackendState)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatalf("failure/timeout waiting for transition to NeedsLogin status: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-01 03:27:05 +00:00
|
|
|
// Tailscale returns a command that runs the tailscale CLI with the provided arguments.
|
|
|
|
// It does not start the process.
|
|
|
|
func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
|
2021-07-20 20:55:09 +00:00
|
|
|
cmd := exec.Command(n.env.cli, "--socket="+n.sockFile)
|
2021-05-01 03:27:05 +00:00
|
|
|
cmd.Args = append(cmd.Args, arg...)
|
|
|
|
cmd.Dir = n.dir
|
2021-07-19 18:07:42 +00:00
|
|
|
cmd.Env = append(os.Environ(),
|
|
|
|
"TS_DEBUG_UP_FLAG_GOOS="+n.upFlagGOOS,
|
2021-07-20 21:10:11 +00:00
|
|
|
"TS_LOGS_DIR="+n.env.t.TempDir(),
|
2021-07-19 18:07:42 +00:00
|
|
|
)
|
|
|
|
if *verboseTailscale {
|
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
}
|
2021-05-01 03:27:05 +00:00
|
|
|
return cmd
|
2021-04-29 21:44:08 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 04:57:25 +00:00
|
|
|
func (n *testNode) Status() (*ipnstate.Status, error) {
|
2021-07-19 18:07:42 +00:00
|
|
|
cmd := n.Tailscale("status", "--json")
|
|
|
|
cmd.Stdout = nil // in case --verbose-tailscale was set
|
|
|
|
cmd.Stderr = nil // in case --verbose-tailscale was set
|
|
|
|
out, err := cmd.CombinedOutput()
|
2021-05-03 21:22:18 +00:00
|
|
|
if err != nil {
|
2021-05-12 04:57:25 +00:00
|
|
|
return nil, fmt.Errorf("running tailscale status: %v, %s", err, out)
|
2021-05-03 21:22:18 +00:00
|
|
|
}
|
|
|
|
st := new(ipnstate.Status)
|
|
|
|
if err := json.Unmarshal(out, st); err != nil {
|
2021-05-12 04:57:25 +00:00
|
|
|
return nil, fmt.Errorf("decoding tailscale status JSON: %w", err)
|
|
|
|
}
|
|
|
|
return st, nil
|
|
|
|
}
|
|
|
|
|
2021-12-16 01:05:21 +00:00
|
|
|
func (n *testNode) MustStatus() *ipnstate.Status {
|
|
|
|
tb := n.env.t
|
2021-05-12 04:57:25 +00:00
|
|
|
tb.Helper()
|
|
|
|
st, err := n.Status()
|
|
|
|
if err != nil {
|
|
|
|
tb.Fatal(err)
|
2021-05-03 21:22:18 +00:00
|
|
|
}
|
|
|
|
return st
|
|
|
|
}
|
|
|
|
|
|
|
|
// trafficTrap is an HTTP proxy handler to note whether any
|
2021-05-03 17:49:45 +00:00
|
|
|
// HTTP traffic tries to leave localhost from tailscaled. We don't
|
|
|
|
// expect any, so any request triggers a failure.
|
2021-05-03 21:22:18 +00:00
|
|
|
type trafficTrap struct {
|
|
|
|
atomicErr atomic.Value // of error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tt *trafficTrap) Err() error {
|
|
|
|
if err, ok := tt.atomicErr.Load().(error); ok {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
2021-05-01 03:27:05 +00:00
|
|
|
var got bytes.Buffer
|
|
|
|
r.Write(&got)
|
|
|
|
err := fmt.Errorf("unexpected HTTP proxy via proxy: %s", got.Bytes())
|
2021-05-03 17:49:45 +00:00
|
|
|
mainError.Store(err)
|
2021-05-03 21:22:18 +00:00
|
|
|
if tt.Err() == nil {
|
|
|
|
// Best effort at remembering the first request.
|
|
|
|
tt.atomicErr.Store(err)
|
|
|
|
}
|
2021-05-03 17:49:45 +00:00
|
|
|
log.Printf("Error: %v", err)
|
2021-05-03 21:22:18 +00:00
|
|
|
w.WriteHeader(403)
|
2021-05-03 17:49:45 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 04:57:25 +00:00
|
|
|
type authURLParserWriter struct {
|
|
|
|
buf bytes.Buffer
|
|
|
|
fn func(urlStr string) error
|
|
|
|
}
|
|
|
|
|
|
|
|
var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`)
|
|
|
|
|
|
|
|
func (w *authURLParserWriter) Write(p []byte) (n int, err error) {
|
|
|
|
n, err = w.buf.Write(p)
|
|
|
|
m := authURLRx.FindSubmatch(w.buf.Bytes())
|
|
|
|
if m != nil {
|
|
|
|
urlStr := string(m[1])
|
|
|
|
w.buf.Reset() // so it's not matched again
|
|
|
|
if err := w.fn(urlStr); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|