From 65f3dab4c6f1e6d121386cbf60e5c661beb7d5c1 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 9 Nov 2022 22:01:34 -0800 Subject: [PATCH] cmd/containerboot: make a tests table, add more tests. Also fix a bugs found while adding the tests, oops. Signed-off-by: David Anderson --- cmd/containerboot/kube.go | 16 +- cmd/containerboot/main.go | 76 +++-- cmd/containerboot/main_test.go | 436 +++++++++++++++++++++++++--- cmd/containerboot/test_tailscale.sh | 3 +- 4 files changed, 445 insertions(+), 86 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 3487d3c3f..db8a459c9 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -18,15 +18,14 @@ "log" "net/http" "os" + "path/filepath" "strings" - "sync" "time" ) // findKeyInKubeSecret inspects the kube secret secretName for a data // field called "authkey", and returns its value if present. func findKeyInKubeSecret(ctx context.Context, secretName string) (string, error) { - kubeOnce.Do(initKube) req, err := http.NewRequest("GET", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s", kubeNamespace, secretName), nil) if err != nil { return "", err @@ -68,8 +67,6 @@ func findKeyInKubeSecret(ctx context.Context, secretName string) (string, error) // storeDeviceID writes deviceID into the "device_id" data field of // the kube secret secretName. func storeDeviceID(ctx context.Context, secretName, deviceID string) error { - kubeOnce.Do(initKube) - // First check if the secret exists at all. Even if running on // kubernetes, we do not necessarily store state in a k8s secret. req, err := http.NewRequest("GET", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s", kubeNamespace, secretName), nil) @@ -109,7 +106,6 @@ func storeDeviceID(ctx context.Context, secretName, deviceID string) error { // deleteAuthKey deletes the 'authkey' field of the given kube // secret. No-op if there is no authkey in the secret. func deleteAuthKey(ctx context.Context, secretName string) error { - kubeOnce.Do(initKube) // m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902. m := []struct { Op string `json:"op"` @@ -141,14 +137,13 @@ func deleteAuthKey(ctx context.Context, secretName string) error { } var ( - kubeOnce sync.Once kubeHost string kubeNamespace string kubeToken string kubeHTTP *http.Transport ) -func initKube() { +func initKube(root string) { // If running in Kubernetes, set things up so that doKubeRequest // can talk successfully to the kube apiserver. if os.Getenv("KUBERNETES_SERVICE_HOST") == "" { @@ -157,19 +152,19 @@ func initKube() { kubeHost = os.Getenv("KUBERNETES_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS") - bs, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + bs, err := os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/namespace")) if err != nil { log.Fatalf("Error reading kube namespace: %v", err) } kubeNamespace = strings.TrimSpace(string(bs)) - bs, err = os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") + bs, err = os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/token")) if err != nil { log.Fatalf("Error reading kube token: %v", err) } kubeToken = strings.TrimSpace(string(bs)) - bs, err = os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") + bs, err = os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/ca.crt")) if err != nil { log.Fatalf("Error reading kube CA cert: %v", err) } @@ -185,7 +180,6 @@ func initKube() { // doKubeRequest sends r to the kube apiserver. func doKubeRequest(ctx context.Context, r *http.Request) (*http.Response, error) { - kubeOnce.Do(initKube) if kubeHTTP == nil { panic("not in kubernetes") } diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 43aa9a070..084b582a9 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -52,6 +52,7 @@ "os" "os/exec" "os/signal" + "path/filepath" "strconv" "strings" "syscall" @@ -81,6 +82,7 @@ func main() { HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), AuthOnce: defaultBool("TS_AUTH_ONCE", false), + Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), } if cfg.ProxyTo != "" && cfg.UserspaceMode { @@ -88,11 +90,11 @@ func main() { } if !cfg.UserspaceMode { - if err := ensureTunFile(); err != nil { + if err := ensureTunFile(cfg.Root); err != nil { log.Fatalf("Unable to create tuntap device file: %v", err) } if cfg.ProxyTo != "" || cfg.Routes != "" { - if err := ensureIPForwarding(cfg.ProxyTo, strings.Split(cfg.Routes, ",")); err != nil { + if err := ensureIPForwarding(cfg.Root, cfg.ProxyTo, cfg.Routes); err != nil { log.Printf("Failed to enable IP forwarding: %v", err) log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.") if cfg.InKubernetes { @@ -104,6 +106,10 @@ func main() { } } + if cfg.InKubernetes { + initKube(cfg.Root) + } + // Context is used for all setup stuff until we're in steady // state, so that if something is hanging we eventually time out // and crashloop the container. @@ -209,10 +215,12 @@ func startAndAuthTailscaled(ctx context.Context, cfg *settings) (*ipnstate.Statu break } + didLogin := false if !cfg.AuthOnce { if err := tailscaleUp(ctx, cfg); err != nil { return nil, 0, fmt.Errorf("couldn't log in: %v", err) } + didLogin = true } tsClient := tailscale.LocalClient{ @@ -243,17 +251,20 @@ func startAndAuthTailscaled(ctx context.Context, cfg *settings) (*ipnstate.Statu } log.Printf("No Tailscale IPs assigned yet") case "NeedsLogin": - // Alas, we cannot currently trigger an authkey login from - // LocalAPI, so we still have to shell out to the - // tailscale CLI for this bit. - if err := tailscaleUp(ctx, cfg); err != nil { - return nil, 0, fmt.Errorf("couldn't log in: %v", err) + if !didLogin { + // Alas, we cannot currently trigger an authkey login from + // LocalAPI, so we still have to shell out to the + // tailscale CLI for this bit. + if err := tailscaleUp(ctx, cfg); err != nil { + return nil, 0, fmt.Errorf("couldn't log in: %v", err) + } + didLogin = true } default: log.Printf("tailscaled in state %q, waiting", st.BackendState) } - time.Sleep(500 * time.Millisecond) + time.Sleep(100 * time.Millisecond) } } @@ -271,7 +282,7 @@ func tailscaledArgs(cfg *settings) []string { if cfg.UserspaceMode { args = append(args, "--tun=userspace-networking") - } else if err := ensureTunFile(); err != nil { + } else if err := ensureTunFile(cfg.Root); err != nil { log.Fatalf("ensuring that /dev/net/tun exists: %v", err) } @@ -316,17 +327,17 @@ func tailscaleUp(ctx context.Context, cfg *settings) error { // ensureTunFile checks that /dev/net/tun exists, creating it if // missing. -func ensureTunFile() error { +func ensureTunFile(root string) error { // Verify that /dev/net/tun exists, in some container envs it // needs to be mknod-ed. - if _, err := os.Stat("/dev/net"); errors.Is(err, fs.ErrNotExist) { - if err := os.MkdirAll("/dev/net", 0755); err != nil { + if _, err := os.Stat(filepath.Join(root, "dev/net")); errors.Is(err, fs.ErrNotExist) { + if err := os.MkdirAll(filepath.Join(root, "dev/net"), 0755); err != nil { return err } } - if _, err := os.Stat("/dev/net/tun"); errors.Is(err, fs.ErrNotExist) { + if _, err := os.Stat(filepath.Join(root, "dev/net/tun")); errors.Is(err, fs.ErrNotExist) { dev := unix.Mkdev(10, 200) // tuntap major and minor - if err := unix.Mknod("/dev/net/tun", 0600|unix.S_IFCHR, int(dev)); err != nil { + if err := unix.Mknod(filepath.Join(root, "dev/net/tun"), 0600|unix.S_IFCHR, int(dev)); err != nil { return err } } @@ -334,37 +345,41 @@ func ensureTunFile() error { } // ensureIPForwarding enables IPv4/IPv6 forwarding for the container. -func ensureIPForwarding(proxyTo string, routes []string) error { +func ensureIPForwarding(root, proxyTo, routes string) error { var ( v4Forwarding, v6Forwarding bool ) - proxyIP, err := netip.ParseAddr(proxyTo) - if err != nil { - return fmt.Errorf("invalid proxy destination IP: %v", err) - } - if proxyIP.Is4() { - v4Forwarding = true - } else { - v6Forwarding = true - } - for _, route := range routes { - cidr, err := netip.ParsePrefix(route) + if proxyTo != "" { + proxyIP, err := netip.ParseAddr(proxyTo) if err != nil { - return fmt.Errorf("invalid subnet route: %v", err) + return fmt.Errorf("invalid proxy destination IP: %v", err) } - if cidr.Addr().Is4() { + if proxyIP.Is4() { v4Forwarding = true } else { v6Forwarding = true } } + if routes != "" { + for _, route := range strings.Split(routes, ",") { + cidr, err := netip.ParsePrefix(route) + if err != nil { + return fmt.Errorf("invalid subnet route: %v", err) + } + if cidr.Addr().Is4() { + v4Forwarding = true + } else { + v6Forwarding = true + } + } + } var paths []string if v4Forwarding { - paths = append(paths, "/proc/sys/net/ipv4/ip_forward") + paths = append(paths, filepath.Join(root, "proc/sys/net/ipv4/ip_forward")) } if v6Forwarding { - paths = append(paths, "/proc/sys/net/ipv6/conf/all/forwarding") + paths = append(paths, filepath.Join(root, "proc/sys/net/ipv6/conf/all/forwarding")) } // In some common configurations (e.g. default docker, @@ -432,6 +447,7 @@ type settings struct { HTTPProxyAddr string Socket string AuthOnce bool + Root string } // defaultEnv returns the value of the given envvar name, or defVal if diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 4b0fd43b8..0dba2005c 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -23,20 +23,20 @@ "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" ) func TestContainerBoot(t *testing.T) { - d, err := os.MkdirTemp("", "containerboot") - if err != nil { - t.Fatal(err) - } + d := t.TempDir() lapi := localAPI{FSRoot: d} if err := lapi.Start(); err != nil { @@ -50,17 +50,39 @@ func TestContainerBoot(t *testing.T) { } defer kube.Close() - for _, path := range []string{"var/lib", "usr/bin", "tmp"} { + dirs := []string{ + "var/lib", + "usr/bin", + "tmp", + "dev/net", + "proc/sys/net/ipv4", + "proc/sys/net/ipv6/conf/all", + } + for _, path := range dirs { if err := os.MkdirAll(filepath.Join(d, path), 0700); err != nil { t.Fatal(err) } } - if err := os.WriteFile(filepath.Join(d, "usr/bin/tailscaled"), fakeTailscaled, 0700); err != nil { - t.Fatal(err) + files := map[string][]byte{ + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), } - if err := os.WriteFile(filepath.Join(d, "usr/bin/tailscale"), fakeTailscale, 0700); err != nil { - t.Fatal(err) + resetFiles := func() { + for path, content := range files { + // Making everything executable is a little weird, but the + // stuff that doesn't need to be executable doesn't care if we + // do make it executable. + if err := os.WriteFile(filepath.Join(d, path), content, 0700); err != nil { + t.Fatal(err) + } + } } + resetFiles() boot := filepath.Join(d, "containerboot") if err := exec.Command("go", "build", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { @@ -68,41 +90,366 @@ func TestContainerBoot(t *testing.T) { } argFile := filepath.Join(d, "args") + tsIPs := []netip.Addr{netip.MustParseAddr("100.64.0.1")} + runningSockPath := filepath.Join(d, "tmp/tailscaled.sock") - lapi.Reset() - kube.Reset() - - cmd := exec.Command(boot) - cmd.Env = []string{ - fmt.Sprintf("PATH=%s/usr/bin:%s", d, os.Getenv("PATH")), - fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", argFile), - fmt.Sprintf("TS_TEST_SOCKET=%s", lapi.Path), - fmt.Sprintf("TS_SOCKET=%s", filepath.Join(d, "tmp/tailscaled.sock")), - } - cbOut := &lockingBuffer{} - cmd.Stderr = cbOut - if err := cmd.Start(); err != nil { - t.Fatalf("starting containerboot: %v", err) - } - defer func() { - cmd.Process.Signal(unix.SIGTERM) - cmd.Process.Wait() - }() - - want := ` -/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking -/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false -` - waitArgs(t, 2*time.Second, d, argFile, want) - - lapi.SetStatus(ipnstate.Status{ - BackendState: "Running", - TailscaleIPs: []netip.Addr{ - netip.MustParseAddr("100.64.0.1"), + // TODO: refactor this 1-2 stuff if we ever need a third + // step. Right now all of containerboot's modes either converge + // with no further interaction needed, or with one extra step + // only. + tests := []struct { + Name string + Env map[string]string + KubeSecret map[string]string + WantArgs1 []string // Wait for containerboot to run these commands... + Status1 ipnstate.Status // ... then report this status in LocalAPI. + WantArgs2 []string // If non-nil, wait for containerboot to run these additional commands... + Status2 ipnstate.Status // ... then report this status in LocalAPI. + WantKubeSecret map[string]string + WantFiles map[string]string + }{ + { + // Out of the box default: runs in userspace mode, ephemeral storage, interactive login. + Name: "no_args", + Env: nil, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + // The tailscale up call blocks until auth is complete, so + // by the time it returns the next converged state is + // Running. + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, }, - }) + { + // Userspace mode, ephemeral storage, authkey provided on every run. + Name: "authkey", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "authkey_disk_state", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_STATE_DIR": filepath.Join(d, "tmp"), + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "routes", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "0", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, + }, + { + Name: "routes_kernel_ipv4", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_ROUTES": "1.2.3.0/24,10.20.30.0/24", + "TS_USERSPACE": "false", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=1.2.3.0/24,10.20.30.0/24", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "0", + }, + }, + { + Name: "routes_kernel_ipv6", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_ROUTES": "::/64,1::/64", + "TS_USERSPACE": "false", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1::/64", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "0", + "proc/sys/net/ipv6/conf/all/forwarding": "1", + }, + }, + { + Name: "routes_kernel_all_families", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_ROUTES": "::/64,1.2.3.0/24", + "TS_USERSPACE": "false", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key --advertise-routes=::/64,1.2.3.0/24", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + WantFiles: map[string]string{ + "proc/sys/net/ipv4/ip_forward": "1", + "proc/sys/net/ipv6/conf/all/forwarding": "1", + }, + }, + { + Name: "proxy", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_DEST_IP": "1.2.3.4", + "TS_USERSPACE": "false", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + WantArgs2: []string{ + "/usr/bin/iptables -t nat -I PREROUTING 1 -d 100.64.0.1 -j DNAT --to-destination 1.2.3.4", + }, + Status2: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "authkey_once", + Env: map[string]string{ + "TS_AUTH_KEY": "tskey-key", + "TS_AUTH_ONCE": "true", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + }, + Status1: ipnstate.Status{ + BackendState: "NeedsLogin", + }, + WantArgs2: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status2: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "kube_storage", + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + Self: &ipnstate.PeerStatus{ + ID: tailcfg.StableNodeID("myID"), + }, + }, + WantKubeSecret: map[string]string{ + "authkey": "tskey-key", + "device_id": "myID", + }, + }, + { + // Same as previous, but deletes the authkey from the kube secret. + Name: "kube_storage_auth_once", + Env: map[string]string{ + "KUBERNETES_SERVICE_HOST": kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": kube.Port, + "TS_AUTH_ONCE": "true", + }, + KubeSecret: map[string]string{ + "authkey": "tskey-key", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", + }, + Status1: ipnstate.Status{ + BackendState: "NeedsLogin", + }, + WantArgs2: []string{ + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + Status2: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + Self: &ipnstate.PeerStatus{ + ID: tailcfg.StableNodeID("myID"), + }, + }, + WantKubeSecret: map[string]string{ + "device_id": "myID", + }, + }, + { + Name: "proxies", + Env: map[string]string{ + "TS_SOCKS5_SERVER": "localhost:1080", + "TS_OUTBOUND_HTTP_PROXY_LISTEN": "localhost:8080", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --socks5-server=localhost:1080 --outbound-http-proxy-listen=localhost:8080", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false", + }, + // The tailscale up call blocks until auth is complete, so + // by the time it returns the next converged state is + // Running. + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "dns", + Env: map[string]string{ + "TS_ACCEPT_DNS": "true", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + { + Name: "extra_args", + Env: map[string]string{ + "TS_EXTRA_ARGS": "--widget=rotated", + "TS_TAILSCALED_EXTRA_ARGS": "--experiments=widgets", + }, + WantArgs1: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking --experiments=widgets", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --widget=rotated", + }, + Status1: ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: tsIPs, + }, + }, + } - waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal") + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + lapi.Reset() + kube.Reset() + os.Remove(argFile) + os.Remove(runningSockPath) + resetFiles() + + for k, v := range test.KubeSecret { + kube.SetSecret(k, v) + } + + cmd := exec.Command(boot) + cmd.Env = []string{ + fmt.Sprintf("PATH=%s/usr/bin:%s", d, os.Getenv("PATH")), + fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", argFile), + fmt.Sprintf("TS_TEST_SOCKET=%s", lapi.Path), + fmt.Sprintf("TS_SOCKET=%s", runningSockPath), + fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", d), + } + for k, v := range test.Env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + cbOut := &lockingBuffer{} + defer func() { + if t.Failed() { + t.Logf("containerboot output:\n%s", cbOut.String()) + } + }() + cmd.Stderr = cbOut + if err := cmd.Start(); err != nil { + t.Fatalf("starting containerboot: %v", err) + } + defer func() { + cmd.Process.Signal(unix.SIGTERM) + cmd.Process.Wait() + }() + + waitArgs(t, 2*time.Second, d, argFile, strings.Join(test.WantArgs1, "\n")) + lapi.SetStatus(test.Status1) + if test.WantArgs2 != nil { + waitArgs(t, 2*time.Second, d, argFile, strings.Join(append(test.WantArgs1, test.WantArgs2...), "\n")) + lapi.SetStatus(test.Status2) + } + waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal") + + if test.WantKubeSecret != nil { + got := kube.Secret() + if diff := cmp.Diff(got, test.WantKubeSecret); diff != "" { + t.Fatalf("unexpected kube secret data (-got+want):\n%s", diff) + } + } else { + got := kube.Secret() + if len(got) != 0 { + t.Fatalf("kube secret unexpectedly not empty, got %#v", got) + } + } + + for path, want := range test.WantFiles { + gotBs, err := os.ReadFile(filepath.Join(d, path)) + if err != nil { + t.Fatalf("reading wanted file %q: %v", path, err) + } + if got := strings.TrimSpace(string(gotBs)); got != want { + t.Errorf("wrong file contents for %q, got %q want %q", path, got, want) + } + } + }) + } } type lockingBuffer struct { @@ -259,8 +606,8 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { // kube secret, and panics on all other uses to make it very obvious // that something unexpected happened. type kubeServer struct { - FSRoot string - Addr string // populated by Start + FSRoot string + Host, Port string // populated by Start srv *httptest.Server @@ -305,7 +652,8 @@ func (k *kubeServer) Start() error { } k.srv = httptest.NewTLSServer(k) - k.Addr = k.srv.Listener.Addr().String() + k.Host = k.srv.Listener.Addr().(*net.TCPAddr).IP.String() + k.Port = strconv.Itoa(k.srv.Listener.Addr().(*net.TCPAddr).Port) var cert bytes.Buffer if err := pem.Encode(&cert, &pem.Block{Type: "CERTIFICATE", Bytes: k.srv.Certificate().Raw}); err != nil { diff --git a/cmd/containerboot/test_tailscale.sh b/cmd/containerboot/test_tailscale.sh index d3a4e364f..1fa10abb1 100644 --- a/cmd/containerboot/test_tailscale.sh +++ b/cmd/containerboot/test_tailscale.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash # -# This is a fake tailscale CLI that records its arguments and exits successfully. +# This is a fake tailscale CLI (and also iptables and ip6tables) that +# records its arguments and exits successfully. # # It is used by main_test.go to test the behavior of containerboot.