Merge 2bf51e5bc5f1beb92255fe239b076103e67ec9c1 into 156cd53e7734407dc42e30af2f12cf6956cd9e24

This commit is contained in:
Tom Proctor 2025-03-24 11:18:11 -07:00 committed by GitHub
commit 8dafd6cfd9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 855 additions and 805 deletions

View File

@ -41,23 +41,34 @@ import (
"tailscale.com/types/ptr"
)
func TestContainerBoot(t *testing.T) {
// testEnv represents the environment needed for a single sub-test so that tests
// can run in parallel.
type testEnv struct {
kube *kubeServer // Fake kube server.
lapi *localAPI // Local TS API server.
d string // Temp dir for the specific test.
argFile string // File with commands test_tailscale{,d}.sh were invoked with.
runningSockPath string // Path to the running tailscaled socket.
localAddrPort int // Port for the containerboot HTTP server.
healthAddrPort int // Port for the (deprecated) containerboot health server.
}
func newTestEnv(t *testing.T) testEnv {
d := t.TempDir()
lapi := localAPI{FSRoot: d}
if err := lapi.Start(); err != nil {
t.Fatal(err)
}
defer lapi.Close()
t.Cleanup(lapi.Close)
kube := kubeServer{FSRoot: d}
kube.Start(t)
defer kube.Close()
t.Cleanup(kube.Close)
tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"}
serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}}
egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net")
egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net")
dirs := []string{
"var/lib",
@ -86,7 +97,6 @@ func TestContainerBoot(t *testing.T) {
filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg),
filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"),
}
resetFiles := func() {
for path, content := range files {
// Making everything executable is a little weird, but the
// stuff that doesn't need to be executable doesn't care if we
@ -95,13 +105,6 @@ func TestContainerBoot(t *testing.T) {
t.Fatal(err)
}
}
}
resetFiles()
boot := filepath.Join(d, "containerboot")
if err := exec.Command("go", "build", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil {
t.Fatalf("Building containerboot: %v", err)
}
argFile := filepath.Join(d, "args")
runningSockPath := filepath.Join(d, "tmp/tailscaled.sock")
@ -117,6 +120,25 @@ func TestContainerBoot(t *testing.T) {
port := ln.Addr().(*net.TCPAddr).Port
*p = port
}
return testEnv{
kube: &kube,
lapi: &lapi,
d: d,
argFile: argFile,
runningSockPath: runningSockPath,
localAddrPort: localAddrPort,
healthAddrPort: healthAddrPort,
}
}
func TestContainerBoot(t *testing.T) {
boot := filepath.Join(t.TempDir(), "containerboot")
if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil {
t.Fatalf("Building containerboot: %v", err)
}
egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net")
metricsURL := func(port int) string {
return fmt.Sprintf("http://127.0.0.1:%d/metrics", port)
}
@ -173,16 +195,16 @@ func TestContainerBoot(t *testing.T) {
}).View(),
},
}
tests := []struct {
Name string
type testCase struct {
Env map[string]string
KubeSecret map[string]string
KubeDenyPatch bool
Phases []phase
}{
{
}
tests := map[string]func(env *testEnv) testCase{
"no_args": func(env *testEnv) testCase {
return testCase{
// Out of the box default: runs in userspace mode, ephemeral storage, interactive login.
Name: "no_args",
Env: nil,
Phases: []phase{
{
@ -200,10 +222,11 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
"authkey": func(env *testEnv) testCase {
return testCase{
// Userspace mode, ephemeral storage, authkey provided on every run.
Name: "authkey",
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
},
@ -218,10 +241,11 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
"authkey-old-flag": func(env *testEnv) testCase {
return testCase{
// Userspace mode, ephemeral storage, authkey provided on every run.
Name: "authkey-old-flag",
Env: map[string]string{
"TS_AUTH_KEY": "tskey-key",
},
@ -236,12 +260,13 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "authkey_disk_state",
"authkey_disk_state": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_STATE_DIR": filepath.Join(d, "tmp"),
"TS_STATE_DIR": filepath.Join(env.d, "tmp"),
},
Phases: []phase{
{
@ -254,9 +279,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "routes",
"routes": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_ROUTES": "1.2.3.0/24,10.20.30.0/24",
@ -276,9 +302,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "empty routes",
"empty routes": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_ROUTES": "",
@ -298,9 +325,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "routes_kernel_ipv4",
"routes_kernel_ipv4": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_ROUTES": "1.2.3.0/24,10.20.30.0/24",
@ -321,9 +349,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "routes_kernel_ipv6",
"routes_kernel_ipv6": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_ROUTES": "::/64,1::/64",
@ -344,9 +373,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "routes_kernel_all_families",
"routes_kernel_all_families": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_ROUTES": "::/64,1.2.3.0/24",
@ -367,9 +397,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "ingress proxy",
"ingress proxy": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_DEST_IP": "1.2.3.4",
@ -386,9 +417,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "egress proxy",
"egress proxy": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_TAILNET_TARGET_IP": "100.99.99.99",
@ -409,9 +441,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "egress_proxy_fqdn_ipv6_target_on_ipv4_host",
"egress_proxy_fqdn_ipv6_target_on_ipv4_host": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net", // resolves to IPv6 address
@ -451,9 +484,10 @@ func TestContainerBoot(t *testing.T) {
WantExitCode: ptr.To(1),
},
},
}
},
{
Name: "authkey_once",
"authkey_once": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_AUTHKEY": "tskey-key",
"TS_AUTH_ONCE": "true",
@ -479,12 +513,13 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "kube_storage",
"kube_storage": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
},
KubeSecret: map[string]string{
"authkey": "tskey-key",
@ -510,15 +545,16 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "kube_disk_storage",
"kube_disk_storage": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
// Explicitly set to an empty value, to override the default of "tailscale".
"TS_KUBE_SECRET": "",
"TS_STATE_DIR": filepath.Join(d, "tmp"),
"TS_STATE_DIR": filepath.Join(env.d, "tmp"),
"TS_AUTHKEY": "tskey-key",
},
KubeSecret: map[string]string{},
@ -535,12 +571,13 @@ func TestContainerBoot(t *testing.T) {
WantKubeSecret: map[string]string{},
},
},
}
},
{
Name: "kube_storage_no_patch",
"kube_storage_no_patch": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
"TS_AUTHKEY": "tskey-key",
},
KubeSecret: map[string]string{},
@ -558,13 +595,14 @@ func TestContainerBoot(t *testing.T) {
WantKubeSecret: map[string]string{},
},
},
}
},
{
"kube_storage_auth_once": func(env *testEnv) testCase {
return testCase{
// Same as previous, but deletes the authkey from the kube secret.
Name: "kube_storage_auth_once",
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
"TS_AUTH_ONCE": "true",
},
KubeSecret: map[string]string{
@ -603,12 +641,13 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "kube_storage_updates",
"kube_storage_updates": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
},
KubeSecret: map[string]string{
"authkey": "tskey-key",
@ -653,9 +692,10 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "proxies",
"proxies": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_SOCKS5_SERVER": "localhost:1080",
"TS_OUTBOUND_HTTP_PROXY_LISTEN": "localhost:8080",
@ -671,9 +711,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "dns",
"dns": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_ACCEPT_DNS": "true",
},
@ -688,9 +729,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "extra_args",
"extra_args": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_EXTRA_ARGS": "--widget=rotated",
"TS_TAILSCALED_EXTRA_ARGS": "--experiments=widgets",
@ -705,9 +747,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "extra_args_accept_routes",
"extra_args_accept_routes": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_EXTRA_ARGS": "--accept-routes",
},
@ -721,9 +764,10 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "hostname",
"hostname": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_HOSTNAME": "my-server",
},
@ -737,11 +781,12 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "experimental tailscaled config path",
"experimental tailscaled config path": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(d, "etc/tailscaled/"),
"TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"),
},
Phases: []phase{
{
@ -752,11 +797,12 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "metrics_enabled",
"metrics_enabled": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort),
"TS_ENABLE_METRICS": "true",
},
Phases: []phase{
@ -766,18 +812,19 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): 200,
healthURL(localAddrPort): -1,
metricsURL(env.localAddrPort): 200,
healthURL(env.localAddrPort): -1,
},
}, {
Notify: runningNotify,
},
},
}
},
{
Name: "health_enabled",
"health_enabled": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort),
"TS_ENABLE_HEALTH_CHECK": "true",
},
Phases: []phase{
@ -787,22 +834,23 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): -1,
healthURL(localAddrPort): 503, // Doesn't start passing until the next phase.
metricsURL(env.localAddrPort): -1,
healthURL(env.localAddrPort): 503, // Doesn't start passing until the next phase.
},
}, {
Notify: runningNotify,
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): -1,
healthURL(localAddrPort): 200,
metricsURL(env.localAddrPort): -1,
healthURL(env.localAddrPort): 200,
},
},
},
}
},
{
Name: "metrics_and_health_on_same_port",
"metrics_and_health_on_same_port": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort),
"TS_ENABLE_METRICS": "true",
"TS_ENABLE_HEALTH_CHECK": "true",
},
@ -813,24 +861,25 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): 200,
healthURL(localAddrPort): 503, // Doesn't start passing until the next phase.
metricsURL(env.localAddrPort): 200,
healthURL(env.localAddrPort): 503, // Doesn't start passing until the next phase.
},
}, {
Notify: runningNotify,
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): 200,
healthURL(localAddrPort): 200,
metricsURL(env.localAddrPort): 200,
healthURL(env.localAddrPort): 200,
},
},
},
}
},
{
Name: "local_metrics_and_deprecated_health",
"local_metrics_and_deprecated_health": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort),
"TS_ENABLE_METRICS": "true",
"TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", healthAddrPort),
"TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", env.healthAddrPort),
},
Phases: []phase{
{
@ -839,22 +888,23 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): 200,
healthURL(healthAddrPort): 503, // Doesn't start passing until the next phase.
metricsURL(env.localAddrPort): 200,
healthURL(env.healthAddrPort): 503, // Doesn't start passing until the next phase.
},
}, {
Notify: runningNotify,
EndpointStatuses: map[string]int{
metricsURL(localAddrPort): 200,
healthURL(healthAddrPort): 200,
metricsURL(env.localAddrPort): 200,
healthURL(env.healthAddrPort): 200,
},
},
},
}
},
{
Name: "serve_config_no_kube",
"serve_config_no_kube": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"),
"TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config.json"),
"TS_AUTHKEY": "tskey-key",
},
Phases: []phase{
@ -868,13 +918,14 @@ func TestContainerBoot(t *testing.T) {
Notify: runningNotify,
},
},
}
},
{
Name: "serve_config_kube",
"serve_config_kube": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"TS_SERVE_CONFIG": filepath.Join(d, "etc/tailscaled/serve-config.json"),
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
"TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config.json"),
},
KubeSecret: map[string]string{
"authkey": "tskey-key",
@ -901,14 +952,15 @@ func TestContainerBoot(t *testing.T) {
},
},
},
}
},
{
Name: "egress_svcs_config_kube",
"egress_svcs_config_kube": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(env.d, "etc/tailscaled"),
"TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", env.localAddrPort),
},
KubeSecret: map[string]string{
"authkey": "tskey-key",
@ -923,7 +975,7 @@ func TestContainerBoot(t *testing.T) {
"authkey": "tskey-key",
},
EndpointStatuses: map[string]int{
egressSvcTerminateURL(localAddrPort): 200,
egressSvcTerminateURL(env.localAddrPort): 200,
},
},
{
@ -937,15 +989,16 @@ func TestContainerBoot(t *testing.T) {
"tailscale_capver": capver,
},
EndpointStatuses: map[string]int{
egressSvcTerminateURL(localAddrPort): 200,
egressSvcTerminateURL(env.localAddrPort): 200,
},
},
},
}
},
{
Name: "egress_svcs_config_no_kube",
"egress_svcs_config_no_kube": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(d, "etc/tailscaled"),
"TS_EGRESS_PROXIES_CONFIG_PATH": filepath.Join(env.d, "etc/tailscaled"),
"TS_AUTHKEY": "tskey-key",
},
Phases: []phase{
@ -954,12 +1007,13 @@ func TestContainerBoot(t *testing.T) {
WantExitCode: ptr.To(1),
},
},
}
},
{
Name: "kube_shutdown_during_state_write",
"kube_shutdown_during_state_write": func(env *testEnv) testCase {
return testCase{
Env: map[string]string{
"KUBERNETES_SERVICE_HOST": kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
"KUBERNETES_SERVICE_HOST": env.kube.Host,
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
"TS_ENABLE_HEALTH_CHECK": "true",
},
KubeSecret: map[string]string{
@ -1010,32 +1064,31 @@ func TestContainerBoot(t *testing.T) {
WantExitCode: ptr.To(0),
},
},
}
},
}
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
lapi.Reset()
kube.Reset()
os.Remove(argFile)
os.Remove(runningSockPath)
resetFiles()
for name, test := range tests {
t.Run(name, func(t *testing.T) {
t.Parallel()
env := newTestEnv(t)
tc := test(&env)
for k, v := range test.KubeSecret {
kube.SetSecret(k, v)
for k, v := range tc.KubeSecret {
env.kube.SetSecret(k, v)
}
kube.SetPatching(!test.KubeDenyPatch)
env.kube.SetPatching(!tc.KubeDenyPatch)
cmd := exec.Command(boot)
cmd.Env = []string{
fmt.Sprintf("PATH=%s/usr/bin:%s", d, os.Getenv("PATH")),
fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", argFile),
fmt.Sprintf("TS_TEST_SOCKET=%s", lapi.Path),
fmt.Sprintf("TS_SOCKET=%s", runningSockPath),
fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", d),
fmt.Sprintf("PATH=%s/usr/bin:%s", env.d, os.Getenv("PATH")),
fmt.Sprintf("TS_TEST_RECORD_ARGS=%s", env.argFile),
fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path),
fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath),
fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d),
fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"),
}
for k, v := range test.Env {
for k, v := range tc.Env {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
}
cbOut := &lockingBuffer{}
@ -1045,6 +1098,7 @@ func TestContainerBoot(t *testing.T) {
}
}()
cmd.Stderr = cbOut
cmd.Stdout = cbOut
if err := cmd.Start(); err != nil {
t.Fatalf("starting containerboot: %v", err)
}
@ -1054,11 +1108,11 @@ func TestContainerBoot(t *testing.T) {
}()
var wantCmds []string
for i, p := range test.Phases {
for i, p := range tc.Phases {
for k, v := range p.UpdateKubeSecret {
kube.SetSecret(k, v)
env.kube.SetSecret(k, v)
}
lapi.Notify(p.Notify)
env.lapi.Notify(p.Notify)
if p.Signal != nil {
cmd.Process.Signal(*p.Signal)
}
@ -1086,15 +1140,15 @@ func TestContainerBoot(t *testing.T) {
}
wantCmds = append(wantCmds, p.WantCmds...)
waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n"))
waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n"))
err := tstest.WaitFor(2*time.Second, func() error {
if p.WantKubeSecret != nil {
got := kube.Secret()
got := env.kube.Secret()
if diff := cmp.Diff(got, p.WantKubeSecret); diff != "" {
return fmt.Errorf("unexpected kube secret data (-got+want):\n%s", diff)
}
} else {
got := kube.Secret()
got := env.kube.Secret()
if len(got) > 0 {
return fmt.Errorf("kube secret unexpectedly not empty, got %#v", got)
}
@ -1106,7 +1160,7 @@ func TestContainerBoot(t *testing.T) {
}
err = tstest.WaitFor(2*time.Second, func() error {
for path, want := range p.WantFiles {
gotBs, err := os.ReadFile(filepath.Join(d, path))
gotBs, err := os.ReadFile(filepath.Join(env.d, path))
if err != nil {
return fmt.Errorf("reading wanted file %q: %v", path, err)
}
@ -1270,13 +1324,6 @@ func (l *localAPI) Close() {
l.srv.Close()
}
func (l *localAPI) Reset() {
l.Lock()
defer l.Unlock()
l.notify = nil
l.cond.Broadcast()
}
func (l *localAPI) Notify(n *ipn.Notify) {
if n == nil {
return
@ -1368,13 +1415,8 @@ func (k *kubeServer) SetPatching(canPatch bool) {
k.canPatch = canPatch
}
func (k *kubeServer) Reset() {
k.Lock()
defer k.Unlock()
k.secret = map[string]string{}
}
func (k *kubeServer) Start(t *testing.T) {
k.secret = map[string]string{}
root := filepath.Join(k.FSRoot, "var/run/secrets/kubernetes.io/serviceaccount")
if err := os.MkdirAll(root, 0700); err != nil {

View File

@ -35,6 +35,9 @@ import (
const tailscaleTunInterface = "tailscale0"
// Modified using a build flag to speed up tests.
var testSleepDuration string
// This file contains functionality to run containerboot as a proxy that can
// route cluster traffic to one or more tailnet targets, based on portmapping
// rules read from a configfile. Currently (9/2024) this is only used for the
@ -149,8 +152,13 @@ func (ep *egressProxy) configure(opts egressProxyRunOpts) {
ep.podIPv4 = opts.podIPv4
ep.tailnetAddrs = opts.tailnetAddrs
ep.client = &http.Client{} // default HTTP client
ep.shortSleep = time.Second
ep.longSleep = time.Second * 10
sleepDuration := time.Second
if d, err := time.ParseDuration(testSleepDuration); err == nil && d > 0 {
log.Printf("using test sleep duration %v", d)
sleepDuration = d
}
ep.shortSleep = sleepDuration
ep.longSleep = sleepDuration * 10
}
// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if

View File

@ -38,11 +38,11 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro
}
log.Printf("Starting tailscaled")
if err := cmd.Start(); err != nil {
return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err)
return nil, nil, fmt.Errorf("starting tailscaled failed: %w", err)
}
// Wait for the socket file to appear, otherwise API ops will racily fail.
log.Printf("Waiting for tailscaled socket")
log.Printf("Waiting for tailscaled socket at %s", cfg.Socket)
for {
if ctx.Err() != nil {
return nil, nil, errors.New("timed out waiting for tailscaled socket")