mirror of
https://github.com/tailscale/tailscale.git
synced 2025-07-30 07:43:42 +00:00
Finish up the fix, automated test
Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
parent
30a13ebbd8
commit
fe3b3d5827
@ -149,7 +149,7 @@ func (kc *kubeClient) waitForConsistentState(ctx context.Context) error {
|
||||
default:
|
||||
}
|
||||
secret, err := kc.GetSecret(ctx, kc.stateSecret)
|
||||
if kubeclient.IsNotFoundErr(err) {
|
||||
if ctx.Err() != nil || kubeclient.IsNotFoundErr(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -137,51 +137,62 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := mainErr(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func mainErr() error {
|
||||
log.SetPrefix("boot: ")
|
||||
tailscale.I_Acknowledge_This_API_Is_Unstable = true
|
||||
|
||||
cfg, err := configFromEnv()
|
||||
if err != nil {
|
||||
log.Fatalf("invalid configuration: %v", err)
|
||||
return fmt.Errorf("invalid configuration: %w", err)
|
||||
}
|
||||
|
||||
if !cfg.UserspaceMode {
|
||||
if err := ensureTunFile(cfg.Root); err != nil {
|
||||
log.Fatalf("Unable to create tuntap device file: %v", err)
|
||||
return fmt.Errorf("unable to create tuntap device file: %w", err)
|
||||
}
|
||||
if cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.Routes != nil || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" {
|
||||
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTargetIP, cfg.TailnetTargetIP, cfg.TailnetTargetFQDN, cfg.Routes); err != nil {
|
||||
log.Printf("Failed to enable IP forwarding: %v", err)
|
||||
log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.")
|
||||
if cfg.InKubernetes {
|
||||
log.Fatalf("You can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
|
||||
return fmt.Errorf("you can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
|
||||
} else {
|
||||
log.Fatalf("You can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
|
||||
return fmt.Errorf("you can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Context is used for all setup stuff until we're in steady
|
||||
// Root context for the whole containerboot process, used to make sure
|
||||
// shutdown signals are promptly and cleanly handled.
|
||||
ctx, cancel := contextWithExitSignalWatch()
|
||||
defer cancel()
|
||||
|
||||
// bootCtx is used for all setup stuff until we're in steady
|
||||
// state, so that if something is hanging we eventually time out
|
||||
// and crashloop the container.
|
||||
bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var kc *kubeClient
|
||||
if cfg.InKubernetes {
|
||||
kc, err = newKubeClient(cfg.Root, cfg.KubeSecret)
|
||||
if err != nil {
|
||||
log.Fatalf("error initializing kube client: %v", err)
|
||||
return fmt.Errorf("error initializing kube client: %w", err)
|
||||
}
|
||||
if err := cfg.setupKube(bootCtx, kc); err != nil {
|
||||
log.Fatalf("error setting up for running on Kubernetes: %v", err)
|
||||
return fmt.Errorf("error setting up for running on Kubernetes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
client, daemonProcess, err := startTailscaled(bootCtx, cfg)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to bring up tailscale: %v", err)
|
||||
return fmt.Errorf("failed to bring up tailscale: %w", err)
|
||||
}
|
||||
killTailscaled := func() {
|
||||
if hasKubeStateStore(cfg) {
|
||||
@ -196,6 +207,7 @@ func main() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second)
|
||||
defer cancel()
|
||||
|
||||
log.Printf("Checking for consistent state")
|
||||
err := kc.waitForConsistentState(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Error waiting for consistent state on shutdown: %v", err)
|
||||
@ -244,7 +256,7 @@ func main() {
|
||||
|
||||
w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to watch tailscaled for updates: %v", err)
|
||||
return fmt.Errorf("failed to watch tailscaled for updates: %w", err)
|
||||
}
|
||||
|
||||
// Now that we've started tailscaled, we can symlink the socket to the
|
||||
@ -280,18 +292,18 @@ func main() {
|
||||
didLogin = true
|
||||
w.Close()
|
||||
if err := tailscaleUp(bootCtx, cfg); err != nil {
|
||||
return fmt.Errorf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
w, err = client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %v", err)
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isTwoStepConfigAlwaysAuth(cfg) {
|
||||
if err := authTailscale(); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -299,7 +311,7 @@ authLoop:
|
||||
for {
|
||||
n, err := w.Next()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to read from tailscaled: %v", err)
|
||||
return fmt.Errorf("failed to read from tailscaled: %w", err)
|
||||
}
|
||||
|
||||
if n.State != nil {
|
||||
@ -308,10 +320,10 @@ authLoop:
|
||||
if isOneStepConfig(cfg) {
|
||||
// This could happen if this is the first time tailscaled was run for this
|
||||
// device and the auth key was not passed via the configfile.
|
||||
log.Fatalf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.")
|
||||
return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.")
|
||||
}
|
||||
if err := authTailscale(); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
case ipn.NeedsMachineAuth:
|
||||
log.Printf("machine authorization required, please visit the admin panel")
|
||||
@ -331,14 +343,11 @@ authLoop:
|
||||
|
||||
w.Close()
|
||||
|
||||
ctx, cancel := contextWithExitSignalWatch()
|
||||
defer cancel()
|
||||
|
||||
if isTwoStepConfigAuthOnce(cfg) {
|
||||
// Now that we are authenticated, we can set/reset any of the
|
||||
// settings that we need to.
|
||||
if err := tailscaleSet(ctx, cfg); err != nil {
|
||||
log.Fatalf("failed to auth tailscale: %v", err)
|
||||
return fmt.Errorf("failed to auth tailscale: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,10 +356,10 @@ authLoop:
|
||||
if cfg.ServeConfigPath != "" {
|
||||
log.Printf("serve proxy: unsetting previous config")
|
||||
if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil {
|
||||
log.Fatalf("failed to unset serve config: %v", err)
|
||||
return fmt.Errorf("failed to unset serve config: %w", err)
|
||||
}
|
||||
if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil {
|
||||
log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err)
|
||||
return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -360,19 +369,19 @@ authLoop:
|
||||
// wipe it, but it's good hygiene.
|
||||
log.Printf("Deleting authkey from kube secret")
|
||||
if err := kc.deleteAuthKey(ctx); err != nil {
|
||||
log.Fatalf("deleting authkey from kube secret: %v", err)
|
||||
return fmt.Errorf("deleting authkey from kube secret: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil {
|
||||
log.Fatalf("storing capability version and UID: %v", err)
|
||||
return fmt.Errorf("storing capability version and UID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
log.Fatalf("rewatching tailscaled for updates after auth: %v", err)
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -396,7 +405,7 @@ authLoop:
|
||||
if isL3Proxy(cfg) {
|
||||
nfr, err = newNetfilterRunner(log.Printf)
|
||||
if err != nil {
|
||||
log.Fatalf("error creating new netfilter runner: %v", err)
|
||||
return fmt.Errorf("error creating new netfilter runner: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -467,7 +476,7 @@ runLoop:
|
||||
killTailscaled()
|
||||
break runLoop
|
||||
case err := <-errChan:
|
||||
log.Fatalf("failed to read from tailscaled: %v", err)
|
||||
return fmt.Errorf("failed to read from tailscaled: %w", err)
|
||||
case n := <-notifyChan:
|
||||
if n.State != nil && *n.State != ipn.Running {
|
||||
// Something's gone wrong and we've left the authenticated state.
|
||||
@ -475,7 +484,7 @@ runLoop:
|
||||
// control flow required to make it work now is hard. So, just crash
|
||||
// the container and rely on the container runtime to restart us,
|
||||
// whereupon we'll go through initial auth again.
|
||||
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||
return fmt.Errorf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||
}
|
||||
if n.NetMap != nil {
|
||||
addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
||||
@ -493,7 +502,7 @@ runLoop:
|
||||
deviceID := n.NetMap.SelfNode.StableID()
|
||||
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceID, &deviceID) {
|
||||
if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil {
|
||||
log.Fatalf("storing device ID in Kubernetes Secret: %v", err)
|
||||
return fmt.Errorf("storing device ID in Kubernetes Secret: %w", err)
|
||||
}
|
||||
}
|
||||
if cfg.TailnetTargetFQDN != "" {
|
||||
@ -530,12 +539,12 @@ runLoop:
|
||||
rulesInstalled = true
|
||||
log.Printf("Installing forwarding rules for destination %v", ea.String())
|
||||
if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
return fmt.Errorf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !rulesInstalled {
|
||||
log.Fatalf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT())
|
||||
return fmt.Errorf("no forwarding rules for egress addresses %v, host supports IPv6: %v", egressAddrs, nfr.HasIPV6NAT())
|
||||
}
|
||||
}
|
||||
currentEgressIPs = newCurentEgressIPs
|
||||
@ -543,7 +552,7 @@ runLoop:
|
||||
if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
log.Printf("Installing proxy rules")
|
||||
if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing ingress proxy rules: %v", err)
|
||||
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
@ -559,7 +568,7 @@ runLoop:
|
||||
if backendsHaveChanged {
|
||||
log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
log.Fatalf("error installing ingress proxy rules: %v", err)
|
||||
return fmt.Errorf("error installing ingress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
resetTimer(false)
|
||||
@ -581,7 +590,7 @@ runLoop:
|
||||
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
||||
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules: %v", err)
|
||||
return fmt.Errorf("installing egress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
// If this is a L7 cluster ingress proxy (set up
|
||||
@ -593,7 +602,7 @@ runLoop:
|
||||
if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
||||
if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
||||
return fmt.Errorf("installing rules to forward traffic to node's tailnet IP: %w", err)
|
||||
}
|
||||
}
|
||||
currentIPs = newCurrentIPs
|
||||
@ -612,7 +621,7 @@ runLoop:
|
||||
deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()}
|
||||
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceEndpoints, &deviceEndpoints) {
|
||||
if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
||||
log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err)
|
||||
return fmt.Errorf("storing device IPs and FQDN in Kubernetes Secret: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -702,16 +711,18 @@ runLoop:
|
||||
if backendsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Backend address change detected, installing proxy rules for backends %v", newBackendAddrs)
|
||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
return fmt.Errorf("installing ingress proxy rules for DNS target %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
}
|
||||
}
|
||||
backendAddrs = newBackendAddrs
|
||||
resetTimer(false)
|
||||
case e := <-egressSvcsErrorChan:
|
||||
log.Fatalf("egress proxy failed: %v", e)
|
||||
return fmt.Errorf("egress proxy failed: %v", e)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureTunFile checks that /dev/net/tun exists, creating it if
|
||||
@ -740,13 +751,13 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
|
||||
ip4s, err := net.DefaultResolver.LookupIP(ctx, "ip4", name)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
|
||||
return nil, fmt.Errorf("error looking up IPv4 addresses: %v", err)
|
||||
return nil, fmt.Errorf("error looking up IPv4 addresses: %w", err)
|
||||
}
|
||||
}
|
||||
ip6s, err := net.DefaultResolver.LookupIP(ctx, "ip6", name)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.DNSError); !(ok && e.IsNotFound) {
|
||||
return nil, fmt.Errorf("error looking up IPv6 addresses: %v", err)
|
||||
return nil, fmt.Errorf("error looking up IPv6 addresses: %w", err)
|
||||
}
|
||||
}
|
||||
if len(ip4s) == 0 && len(ip6s) == 0 {
|
||||
@ -759,7 +770,7 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) {
|
||||
// context that gets cancelled when a signal is received and a cancel function
|
||||
// that can be called to free the resources when the watch should be stopped.
|
||||
func contextWithExitSignalWatch() (context.Context, func()) {
|
||||
closeChan := make(chan string)
|
||||
closeChan := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
@ -771,8 +782,11 @@ func contextWithExitSignalWatch() (context.Context, func()) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
closeOnce := sync.Once{}
|
||||
f := func() {
|
||||
closeChan <- "goodbye"
|
||||
closeOnce.Do(func() {
|
||||
close(closeChan)
|
||||
})
|
||||
}
|
||||
return ctx, f
|
||||
}
|
||||
|
@ -7,11 +7,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@ -20,7 +15,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -31,6 +25,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -134,15 +129,29 @@ func TestContainerBoot(t *testing.T) {
|
||||
|
||||
// WantCmds is the commands that containerboot should run in this phase.
|
||||
WantCmds []string
|
||||
|
||||
// WantKubeSecret is the secret keys/values that should exist in the
|
||||
// kube secret.
|
||||
WantKubeSecret map[string]string
|
||||
|
||||
// Update the kube secret with these keys/values at the beginning of the
|
||||
// phase (simulates our fake tailscaled doing it).
|
||||
UpdateKubeSecret map[string]string
|
||||
|
||||
// WantFiles files that should exist in the container and their
|
||||
// contents.
|
||||
WantFiles map[string]string
|
||||
// WantFatalLog is the fatal log message we expect from containerboot.
|
||||
// If set for a phase, the test will finish on that phase.
|
||||
WantFatalLog string
|
||||
|
||||
// WantLog is a log message we expect from containerboot.
|
||||
WantLog string
|
||||
|
||||
// If set for a phase, the test will expect containerboot to exit with
|
||||
// this error code, and the test will finish on that phase without
|
||||
// waiting for the successful startup log message.
|
||||
WantExitCode *int
|
||||
|
||||
// The signal to send to containerboot at the start of the phase.
|
||||
Signal *syscall.Signal
|
||||
|
||||
EndpointStatuses map[string]int
|
||||
}
|
||||
@ -430,7 +439,8 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
WantFatalLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false",
|
||||
WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false",
|
||||
WantExitCode: ptr.To(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -833,6 +843,60 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "kube_shutdown_during_state_write",
|
||||
Env: map[string]string{
|
||||
"KUBERNETES_SERVICE_HOST": kube.Host,
|
||||
"KUBERNETES_SERVICE_PORT_HTTPS": kube.Port,
|
||||
},
|
||||
KubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
// Normal startup.
|
||||
WantCmds: []string{
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking",
|
||||
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key",
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
},
|
||||
},
|
||||
{
|
||||
// SIGTERM before state is finished writing, should wait for
|
||||
// consistent state before propagating SIGTERM to tailscaled.
|
||||
Signal: ptr.To(unix.SIGTERM),
|
||||
UpdateKubeSecret: map[string]string{
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
// Missing "_current-profile" key.
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
},
|
||||
WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"",
|
||||
},
|
||||
{
|
||||
// tailscaled has finished writing state, should propagate SIGTERM.
|
||||
UpdateKubeSecret: map[string]string{
|
||||
"_current-profile": "foo",
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
"_current-profile": "foo",
|
||||
},
|
||||
WantExitCode: ptr.To(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -877,26 +941,36 @@ func TestContainerBoot(t *testing.T) {
|
||||
|
||||
var wantCmds []string
|
||||
for i, p := range test.Phases {
|
||||
for k, v := range p.UpdateKubeSecret {
|
||||
kube.SetSecret(k, v)
|
||||
}
|
||||
lapi.Notify(p.Notify)
|
||||
if p.WantFatalLog != "" {
|
||||
if p.Signal != nil {
|
||||
cmd.Process.Signal(*p.Signal)
|
||||
}
|
||||
if p.WantLog != "" {
|
||||
err := tstest.WaitFor(2*time.Second, func() error {
|
||||
state, err := cmd.Process.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if state.ExitCode() != 1 {
|
||||
return fmt.Errorf("process exited with code %d but wanted %d", state.ExitCode(), 1)
|
||||
}
|
||||
waitLogLine(t, time.Second, cbOut, p.WantFatalLog)
|
||||
waitLogLine(t, time.Second, cbOut, p.WantLog)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.WantExitCode != nil {
|
||||
state, err := cmd.Process.Wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state.ExitCode() != *p.WantExitCode {
|
||||
t.Fatalf("phase %d: want exit code %d, got %d", i, *p.WantExitCode, state.ExitCode())
|
||||
}
|
||||
|
||||
// Early test return, we don't expect the successful startup log message.
|
||||
return
|
||||
}
|
||||
|
||||
wantCmds = append(wantCmds, p.WantCmds...)
|
||||
waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n"))
|
||||
err := tstest.WaitFor(2*time.Second, func() error {
|
||||
@ -952,6 +1026,9 @@ func TestContainerBoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal")
|
||||
if cmd.ProcessState != nil {
|
||||
t.Fatalf("containerboot should be running but exited with exit code %d", cmd.ProcessState.ExitCode())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1197,7 +1274,7 @@ func (k *kubeServer) Start(t *testing.T, ip string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
k.srv = httptestServer(t, ip, k)
|
||||
k.srv = httptest.NewTLSServer(k)
|
||||
k.Host = k.srv.Listener.Addr().(*net.TCPAddr).IP.String()
|
||||
k.Port = strconv.Itoa(k.srv.Listener.Addr().(*net.TCPAddr).Port)
|
||||
|
||||
@ -1219,8 +1296,6 @@ func (k *kubeServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
panic("client didn't provide bearer token in request")
|
||||
}
|
||||
switch r.URL.Path {
|
||||
case "/api/v1/namespaces/default/secrets":
|
||||
k.serveCreateSecret(w, r)
|
||||
case "/api/v1/namespaces/default/secrets/tailscale":
|
||||
k.serveSecret(w, r)
|
||||
case "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews":
|
||||
@ -1252,40 +1327,6 @@ func (k *kubeServer) serveSSAR(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, `{"status":{"allowed":%v}}`, ok)
|
||||
}
|
||||
|
||||
func (k *kubeServer) serveCreateSecret(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "PUT" {
|
||||
panic(fmt.Sprintf("unhandled HTTP request %s %s", r.Method, r.URL))
|
||||
}
|
||||
|
||||
bs, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("reading request body: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
payload := map[string]any{}
|
||||
if err := json.Unmarshal(bs, &payload); err != nil {
|
||||
panic("unmarshal failed")
|
||||
}
|
||||
data := payload["data"].(map[string]any)
|
||||
k.secret = map[string]string{}
|
||||
for key, value := range data {
|
||||
v, err := base64.StdEncoding.DecodeString(value.(string))
|
||||
if err != nil {
|
||||
panic("base64 decode failed")
|
||||
}
|
||||
k.secret[key] = string(v)
|
||||
}
|
||||
// The real API echoes back the secret with additional fields set.
|
||||
if _, err := w.Write(bs); err != nil {
|
||||
panic("write failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
bs, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@ -1325,13 +1366,16 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
for _, op := range req {
|
||||
if op.Op != "remove" {
|
||||
switch op.Op {
|
||||
case "remove":
|
||||
if !strings.HasPrefix(op.Path, "/data/") {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
delete(k.secret, strings.TrimPrefix(op.Path, "/data/"))
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported json-patch op %q", op.Op))
|
||||
}
|
||||
if !strings.HasPrefix(op.Path, "/data/") {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
delete(k.secret, strings.TrimPrefix(op.Path, "/data/"))
|
||||
|
||||
}
|
||||
case "application/strategic-merge-patch+json":
|
||||
req := struct {
|
||||
@ -1350,75 +1394,3 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
panic(fmt.Sprintf("unhandled HTTP request %s %s", r.Method, r.URL))
|
||||
}
|
||||
}
|
||||
|
||||
func httptestServer(t *testing.T, ip string, handler http.Handler) *httptest.Server {
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("%s:0", ip))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv := &httptest.Server{
|
||||
Listener: ln,
|
||||
Config: &http.Server{Handler: handler},
|
||||
}
|
||||
|
||||
// Generate a TLS certificate valid for ip.
|
||||
priv, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
template := x509.Certificate{
|
||||
IPAddresses: []net.IP{net.ParseIP(ip)},
|
||||
SerialNumber: big.NewInt(time.Now().Unix()),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Acme Co"},
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(24 * time.Hour),
|
||||
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv.TLS = &tls.Config{
|
||||
Certificates: []tls.Certificate{{
|
||||
Certificate: [][]byte{derBytes},
|
||||
PrivateKey: priv,
|
||||
}},
|
||||
}
|
||||
srv.StartTLS()
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
// TestFakeKubernetesAPIServer aims to make it trivially easy to run interactive
|
||||
// tests against containerboot within a container but without a real k8s cluster.
|
||||
// The container just needs enough kube API to store its state in a "Secret".
|
||||
//
|
||||
// export TS_AUTHKEY=ts...
|
||||
// DOCKER_IP=$(docker network inspect bridge --format '{{(index .IPAM.Config 0).Gateway}}')
|
||||
// TAGS=test PLATFORM=local REPO=tailscale make publishdevimage
|
||||
// FAKE_KUBERNETES_API_IP=${DOCKER_IP} go test -timeout 0 -v -run TestFakeKubernetesAPIServer ./cmd/containerboot/
|
||||
// <Run the docker command printed to the terminal>
|
||||
func TestFakeKubernetesAPIServer(t *testing.T) {
|
||||
ip := os.Getenv("FAKE_KUBERNETES_API_IP")
|
||||
if ip == "" {
|
||||
t.Skip("not a real test, set FAKE_KUBERNETES_API_IP to run")
|
||||
}
|
||||
|
||||
d := t.TempDir()
|
||||
kube := &kubeServer{FSRoot: d}
|
||||
kube.Start(t, ip)
|
||||
defer kube.Close()
|
||||
|
||||
t.Logf("Fake Kubernetes API server running at https://%s:%s", kube.Host, kube.Port)
|
||||
t.Logf("Read secret:\ncurl --silent --insecure -H \"Authorization: Bearer bearer_token\" https://%s:%s/api/v1/namespaces/default/secrets/tailscale | jq", kube.Host, kube.Port)
|
||||
t.Logf("Run the client:\ndocker run --rm -e TS_USERSPACE=false -e TS_AUTHKEY -e TS_AUTH_ONCE=true --device /dev/net/tun --cap-add NET_ADMIN --cap-add NET_RAW -e KUBERNETES_SERVICE_HOST=%s -e KUBERNETES_SERVICE_PORT_HTTPS=%s -v %s/var/run/secrets:/var/run/secrets -e TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV=true tailscale:test", kube.Host, kube.Port, d)
|
||||
|
||||
<-make(chan struct{})
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user