mirror of
https://github.com/tailscale/tailscale.git
synced 2025-10-09 16:11:23 +00:00
cmd/containerboot: wait for consistent state on shutdown (#14263)
tailscaled's ipn package writes a collection of keys to state after authenticating to control, but one at a time. If containerboot happens to send a SIGTERM signal to tailscaled in the middle of writing those keys, it may shut down with an inconsistent state Secret and never recover. While we can't durably fix this with our current single-use auth keys (no atomic operation to auth + write state), we can reduce the window for this race condition by checking for partial state before sending SIGTERM to tailscaled. Best effort only. Updates #14080 Change-Id: I0532d51b6f0b7d391e538468bd6a0a80dbe1d9f7 Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
@@ -8,15 +8,22 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
|
||||
@@ -126,3 +133,62 @@ func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error {
|
||||
}
|
||||
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
|
||||
}
|
||||
|
||||
// waitForConsistentState waits for tailscaled to finish writing state if it
|
||||
// looks like it's started. It is designed to reduce the likelihood that
|
||||
// tailscaled gets shut down in the window between authenticating to control
|
||||
// and finishing writing state. However, it's not bullet proof because we can't
|
||||
// atomically authenticate and write state.
|
||||
func (kc *kubeClient) waitForConsistentState(ctx context.Context) error {
|
||||
var logged bool
|
||||
|
||||
bo := backoff.NewBackoff("", logger.Discard, 2*time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
secret, err := kc.GetSecret(ctx, kc.stateSecret)
|
||||
if ctx.Err() != nil || kubeclient.IsNotFoundErr(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting Secret %q: %v", kc.stateSecret, err)
|
||||
}
|
||||
|
||||
if hasConsistentState(secret.Data) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !logged {
|
||||
log.Printf("Waiting for tailscaled to finish writing state to Secret %q", kc.stateSecret)
|
||||
logged = true
|
||||
}
|
||||
bo.BackOff(ctx, errors.New("")) // Fake error to trigger actual sleep.
|
||||
}
|
||||
}
|
||||
|
||||
// hasConsistentState returns true is there is either no state or the full set
|
||||
// of expected keys are present.
|
||||
func hasConsistentState(d map[string][]byte) bool {
|
||||
var (
|
||||
_, hasCurrent = d[string(ipn.CurrentProfileStateKey)]
|
||||
_, hasKnown = d[string(ipn.KnownProfilesStateKey)]
|
||||
_, hasMachine = d[string(ipn.MachineKeyStateKey)]
|
||||
hasProfile bool
|
||||
)
|
||||
|
||||
for k := range d {
|
||||
if strings.HasPrefix(k, "profile-") {
|
||||
if hasProfile {
|
||||
return false // We only expect one profile.
|
||||
}
|
||||
hasProfile = true
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate check, we don't want to reimplement all of profileManager.
|
||||
return (hasCurrent && hasKnown && hasMachine && hasProfile) ||
|
||||
(!hasCurrent && !hasKnown && !hasMachine && !hasProfile)
|
||||
}
|
||||
|
Reference in New Issue
Block a user