mirror of
https://github.com/tailscale/tailscale.git
synced 2025-06-29 03:28:41 +00:00
cmd/{containerboot,k8s-operator}: use state Secret for checking device auth (#16328)
Previously, the operator checked the ProxyGroup status fields for information on how many of the proxies had successfully authed. Use their state Secrets instead as a more reliable source of truth. containerboot has written device_fqdn and device_ips keys to the state Secret since inception, and pod_uid since 1.78.0, so there's no need to use the API for that data. Read it from the state Secret for consistency. However, to ensure we don't read data from a previous run of containerboot, make sure we reset containerboot's state keys on startup. One other knock-on effect of that is ProxyGroups can briefly be marked not Ready while a Pod is restarting. Introduce a new ProxyGroupAvailable condition to more accurately reflect when downstream controllers can implement flows that rely on a ProxyGroup having at least 1 proxy Pod running. Fixes #16327 Change-Id: I026c18e9d23e87109a471a87b8e4fb6271716a66 Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
parent
f81baa2d56
commit
711698f5a9
@ -18,12 +18,15 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/ingressservices"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
|
||||
@ -117,20 +120,39 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale
|
||||
// state Secret.
|
||||
// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container.
|
||||
func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error {
|
||||
capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion)
|
||||
d := map[string][]byte{
|
||||
kubetypes.KeyCapVer: []byte(capVerS),
|
||||
// resetContainerbootState resets state from previous runs of containerboot to
|
||||
// ensure the operator doesn't use stale state when a Pod is first recreated.
|
||||
func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error {
|
||||
existingSecret, err := kc.GetSecret(ctx, kc.stateSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err)
|
||||
}
|
||||
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion),
|
||||
},
|
||||
}
|
||||
if podUID != "" {
|
||||
d[kubetypes.KeyPodUID] = []byte(podUID)
|
||||
s.Data[kubetypes.KeyPodUID] = []byte(podUID)
|
||||
}
|
||||
s := &kubeapi.Secret{
|
||||
Data: d,
|
||||
|
||||
toClear := set.SetOf([]string{
|
||||
kubetypes.KeyDeviceID,
|
||||
kubetypes.KeyDeviceFQDN,
|
||||
kubetypes.KeyDeviceIPs,
|
||||
kubetypes.KeyHTTPSEndpoint,
|
||||
egressservices.KeyEgressServices,
|
||||
ingressservices.IngressConfigKey,
|
||||
})
|
||||
for key := range existingSecret.Data {
|
||||
if toClear.Contains(key) {
|
||||
// It's fine to leave the key in place as a debugging breadcrumb,
|
||||
// it should get a new value soon.
|
||||
s.Data[key] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,18 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/ingressservices"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func TestSetupKube(t *testing.T) {
|
||||
@ -238,3 +243,78 @@ func TestWaitForConsistentState(t *testing.T) {
|
||||
t.Fatalf("expected nil, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResetContainerbootState(t *testing.T) {
|
||||
capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion)
|
||||
for name, tc := range map[string]struct {
|
||||
podUID string
|
||||
initial map[string][]byte
|
||||
expected map[string][]byte
|
||||
}{
|
||||
"empty_initial": {
|
||||
podUID: "1234",
|
||||
initial: map[string][]byte{},
|
||||
expected: map[string][]byte{
|
||||
kubetypes.KeyCapVer: capver,
|
||||
kubetypes.KeyPodUID: []byte("1234"),
|
||||
},
|
||||
},
|
||||
"empty_initial_no_pod_uid": {
|
||||
initial: map[string][]byte{},
|
||||
expected: map[string][]byte{
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
"only_relevant_keys_updated": {
|
||||
podUID: "1234",
|
||||
initial: map[string][]byte{
|
||||
kubetypes.KeyCapVer: []byte("1"),
|
||||
kubetypes.KeyPodUID: []byte("5678"),
|
||||
kubetypes.KeyDeviceID: []byte("device-id"),
|
||||
kubetypes.KeyDeviceFQDN: []byte("device-fqdn"),
|
||||
kubetypes.KeyDeviceIPs: []byte(`["192.0.2.1"]`),
|
||||
kubetypes.KeyHTTPSEndpoint: []byte("https://example.com"),
|
||||
egressservices.KeyEgressServices: []byte("egress-services"),
|
||||
ingressservices.IngressConfigKey: []byte("ingress-config"),
|
||||
"_current-profile": []byte("current-profile"),
|
||||
"_machinekey": []byte("machine-key"),
|
||||
"_profiles": []byte("profiles"),
|
||||
"_serve_e0ce": []byte("serve-e0ce"),
|
||||
"profile-e0ce": []byte("profile-e0ce"),
|
||||
},
|
||||
expected: map[string][]byte{
|
||||
kubetypes.KeyCapVer: capver,
|
||||
kubetypes.KeyPodUID: []byte("1234"),
|
||||
// Cleared keys.
|
||||
kubetypes.KeyDeviceID: nil,
|
||||
kubetypes.KeyDeviceFQDN: nil,
|
||||
kubetypes.KeyDeviceIPs: nil,
|
||||
kubetypes.KeyHTTPSEndpoint: nil,
|
||||
egressservices.KeyEgressServices: nil,
|
||||
ingressservices.IngressConfigKey: nil,
|
||||
// Tailscaled keys not included in patch.
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var actual map[string][]byte
|
||||
kc := &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
|
||||
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
|
||||
return &kubeapi.Secret{
|
||||
Data: tc.initial,
|
||||
}, nil
|
||||
},
|
||||
StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error {
|
||||
actual = secret.Data
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
if err := kc.resetContainerbootState(context.Background(), tc.podUID); err != nil {
|
||||
t.Fatalf("resetContainerbootState() error = %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.expected, actual); diff != "" {
|
||||
t.Errorf("resetContainerbootState() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -188,6 +188,14 @@ func run() error {
|
||||
if err := cfg.setupKube(bootCtx, kc); err != nil {
|
||||
return fmt.Errorf("error setting up for running on Kubernetes: %w", err)
|
||||
}
|
||||
// Clear out any state from previous runs of containerboot. Check
|
||||
// hasKubeStateStore because although we know we're in kube, that
|
||||
// doesn't guarantee the state store is properly configured.
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.resetContainerbootState(bootCtx, cfg.PodUID); err != nil {
|
||||
return fmt.Errorf("error clearing previous state from Secret: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client, daemonProcess, err := startTailscaled(bootCtx, cfg)
|
||||
@ -367,11 +375,6 @@ authLoop:
|
||||
if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil {
|
||||
return fmt.Errorf("failed to unset serve config: %w", err)
|
||||
}
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil {
|
||||
return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) {
|
||||
@ -384,12 +387,6 @@ authLoop:
|
||||
}
|
||||
}
|
||||
|
||||
if hasKubeStateStore(cfg) {
|
||||
if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil {
|
||||
return fmt.Errorf("storing capability version and UID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err)
|
||||
|
@ -460,6 +460,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
Env: map[string]string{
|
||||
"KUBERNETES_SERVICE_HOST": env.kube.Host,
|
||||
"KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port,
|
||||
"POD_UID": "some-pod-uid",
|
||||
},
|
||||
KubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
@ -472,6 +473,8 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
kubetypes.KeyPodUID: "some-pod-uid",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -481,7 +484,8 @@ func TestContainerBoot(t *testing.T) {
|
||||
"device_fqdn": "test-node.test.ts.net",
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
kubetypes.KeyPodUID: "some-pod-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -555,6 +559,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -566,6 +571,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -577,7 +583,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"device_fqdn": "test-node.test.ts.net",
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -600,6 +606,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -609,7 +616,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"device_fqdn": "test-node.test.ts.net",
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -628,7 +635,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"device_fqdn": "new-name.test.ts.net",
|
||||
"device_id": "newID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -913,6 +920,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -923,7 +931,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"https_endpoint": "no-https",
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -948,6 +956,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
EndpointStatuses: map[string]int{
|
||||
egressSvcTerminateURL(env.localAddrPort): 200,
|
||||
@ -956,12 +965,12 @@ func TestContainerBoot(t *testing.T) {
|
||||
{
|
||||
Notify: runningNotify,
|
||||
WantKubeSecret: map[string]string{
|
||||
"egress-services": mustBase64(t, egressStatus),
|
||||
"egress-services": string(mustJSON(t, egressStatus)),
|
||||
"authkey": "tskey-key",
|
||||
"device_fqdn": "test-node.test.ts.net",
|
||||
"device_id": "myID",
|
||||
"device_ips": `["100.64.0.1"]`,
|
||||
"tailscale_capver": capver,
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
EndpointStatuses: map[string]int{
|
||||
egressSvcTerminateURL(env.localAddrPort): 200,
|
||||
@ -1003,6 +1012,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
},
|
||||
WantKubeSecret: map[string]string{
|
||||
"authkey": "tskey-key",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1020,6 +1030,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"_machinekey": "foo",
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"",
|
||||
},
|
||||
@ -1034,6 +1045,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
"_profiles": "foo",
|
||||
"profile-baff": "foo",
|
||||
"_current-profile": "foo",
|
||||
kubetypes.KeyCapVer: capver,
|
||||
},
|
||||
WantLog: "HTTP server at [::]:9002 closed",
|
||||
WantExitCode: ptr.To(0),
|
||||
@ -1061,7 +1073,7 @@ func TestContainerBoot(t *testing.T) {
|
||||
fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path),
|
||||
fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath),
|
||||
fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d),
|
||||
fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"),
|
||||
"TS_TEST_FAKE_NETFILTER=true",
|
||||
}
|
||||
for k, v := range tc.Env {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
|
||||
@ -1489,10 +1501,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
switch r.Header.Get("Content-Type") {
|
||||
case "application/json-patch+json":
|
||||
req := []struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
}{}
|
||||
req := []kubeclient.JSONPatch{}
|
||||
if err := json.Unmarshal(bs, &req); err != nil {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
@ -1503,23 +1512,20 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
delete(k.secret, strings.TrimPrefix(op.Path, "/data/"))
|
||||
case "replace":
|
||||
case "add", "replace":
|
||||
path, ok := strings.CutPrefix(op.Path, "/data/")
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unsupported json-patch path %q", op.Path))
|
||||
}
|
||||
req := make([]kubeclient.JSONPatch, 0)
|
||||
if err := json.Unmarshal(bs, &req); err != nil {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
|
||||
for _, patch := range req {
|
||||
val, ok := patch.Value.(string)
|
||||
val, ok := op.Value.(string)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value))
|
||||
panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", op.Value))
|
||||
}
|
||||
k.secret[path] = val
|
||||
v, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("json patch value %q is not base64 encoded: %v", val, err))
|
||||
}
|
||||
k.secret[path] = string(v)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported json-patch op %q", op.Op))
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
msg = err.Error()
|
||||
return res, err
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
l.Infof("ProxyGroup for Service is not ready, waiting...")
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
st = metav1.ConditionFalse
|
||||
|
@ -137,7 +137,7 @@ func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replic
|
||||
}
|
||||
|
||||
func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) {
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l)
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l)
|
||||
}
|
||||
|
||||
func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) {
|
||||
|
@ -531,7 +531,7 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
return false, nil
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
}
|
||||
return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err)
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
logger.Infof("ProxyGroup is not (yet) ready")
|
||||
return false, nil
|
||||
}
|
||||
@ -666,7 +666,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki
|
||||
}
|
||||
|
||||
// Validate TLS configuration
|
||||
if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) {
|
||||
if len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) {
|
||||
errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS))
|
||||
}
|
||||
|
||||
@ -683,7 +683,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki
|
||||
}
|
||||
|
||||
// Validate ProxyGroup readiness
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name))
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ func TestValidateIngress(t *testing.T) {
|
||||
Status: tsapi.ProxyGroupStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Type: string(tsapi.ProxyGroupAvailable),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
@ -399,7 +399,7 @@ func TestValidateIngress(t *testing.T) {
|
||||
Status: tsapi.ProxyGroupStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Type: string(tsapi.ProxyGroupAvailable),
|
||||
Status: metav1.ConditionFalse,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
@ -755,7 +755,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec
|
||||
Labels: pgSecretLabels(pgName, "config"),
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)),
|
||||
tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)),
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -794,13 +794,13 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) {
|
||||
Labels: pgSecretLabels(pgName, "config"),
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tsoperator.TailscaledConfigFileName(106): []byte("{}"),
|
||||
tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"),
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, pgCfgSecret)
|
||||
pg.Status.Conditions = []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Type: string(tsapi.ProxyGroupAvailable),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
|
@ -52,6 +52,17 @@ const (
|
||||
// Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c
|
||||
optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
|
||||
staticEndpointsMaxAddrs = 2
|
||||
|
||||
// The minimum tailcfg.CapabilityVersion that deployed clients are expected
|
||||
// to support to be compatible with the current ProxyGroup controller.
|
||||
// If the controller needs to depend on newer client behaviour, it should
|
||||
// maintain backwards compatible logic for older capability versions for 3
|
||||
// stable releases, as per documentation on supported version drift:
|
||||
// https://tailscale.com/kb/1236/kubernetes-operator#supported-versions
|
||||
//
|
||||
// tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was
|
||||
// first introduced.
|
||||
pgMinCapabilityVersion = 106
|
||||
)
|
||||
|
||||
var (
|
||||
@ -204,14 +215,27 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
}
|
||||
|
||||
desiredReplicas := int(pgReplicas(pg))
|
||||
if len(pg.Status.Devices) < desiredReplicas {
|
||||
|
||||
// Set ProxyGroupAvailable condition.
|
||||
status := metav1.ConditionFalse
|
||||
reason := reasonProxyGroupCreating
|
||||
message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas)
|
||||
if len(pg.Status.Devices) > 0 {
|
||||
status = metav1.ConditionTrue
|
||||
if len(pg.Status.Devices) == desiredReplicas {
|
||||
reason = reasonProxyGroupReady
|
||||
}
|
||||
}
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, pg.Generation, r.clock, logger)
|
||||
|
||||
// Set ProxyGroupReady condition.
|
||||
if len(pg.Status.Devices) < desiredReplicas {
|
||||
logger.Debug(message)
|
||||
return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message)
|
||||
}
|
||||
|
||||
if len(pg.Status.Devices) > desiredReplicas {
|
||||
message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas)
|
||||
message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas)
|
||||
logger.Debug(message)
|
||||
return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message)
|
||||
}
|
||||
@ -524,17 +548,13 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg
|
||||
if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.Delete(ctx, m.stateSecret); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err)
|
||||
}
|
||||
if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error deleting state Secret %q: %w", m.stateSecret.Name, err)
|
||||
}
|
||||
configSecret := m.stateSecret.DeepCopy()
|
||||
configSecret.Name += "-config"
|
||||
if err := r.Delete(ctx, configSecret); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err)
|
||||
}
|
||||
if err := r.Delete(ctx, configSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error deleting config Secret %q: %w", configSecret.Name, err)
|
||||
}
|
||||
// NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough
|
||||
svc := &corev1.Service{
|
||||
@ -635,17 +655,38 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var authKey string
|
||||
var authKey *string
|
||||
if existingCfgSecret == nil {
|
||||
logger.Debugf("Creating authkey for new ProxyGroup proxy")
|
||||
tags := pg.Spec.Tags.Stringify()
|
||||
if len(tags) == 0 {
|
||||
tags = r.defaultTags
|
||||
}
|
||||
authKey, err = newAuthKey(ctx, r.tsClient, tags)
|
||||
key, err := newAuthKey(ctx, r.tsClient, tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authKey = &key
|
||||
}
|
||||
|
||||
if authKey == nil {
|
||||
// Get state Secret to check if it's already authed.
|
||||
stateSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pgStateSecretName(pg.Name, i),
|
||||
Namespace: r.tsNamespace,
|
||||
},
|
||||
}
|
||||
if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if shouldRetainAuthKey(stateSecret) && existingCfgSecret != nil {
|
||||
authKey, err = authKeyFromSecret(existingCfgSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
replicaName := pgNodePortServiceName(pg.Name, i)
|
||||
@ -661,7 +702,14 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
|
||||
}
|
||||
}
|
||||
|
||||
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret, endpoints[replicaName])
|
||||
// AdvertiseServices config is set by ingress-pg-reconciler, so make sure we
|
||||
// don't overwrite it if already set.
|
||||
existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
@ -811,20 +859,22 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro
|
||||
gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len()))
|
||||
}
|
||||
|
||||
func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret, staticEndpoints []netip.AddrPort) (tailscaledConfigs, error) {
|
||||
func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) {
|
||||
conf := &ipn.ConfigVAlpha{
|
||||
Version: "alpha0",
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)),
|
||||
AdvertiseServices: oldAdvertiseServices,
|
||||
AuthKey: authKey,
|
||||
}
|
||||
|
||||
if pg.Spec.HostnamePrefix != "" {
|
||||
conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx))
|
||||
}
|
||||
|
||||
if shouldAcceptRoutes(class) {
|
||||
if shouldAcceptRoutes(pc) {
|
||||
conf.AcceptRoutes = "true"
|
||||
}
|
||||
|
||||
@ -832,51 +882,26 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32
|
||||
conf.StaticEndpoints = staticEndpoints
|
||||
}
|
||||
|
||||
deviceAuthed := false
|
||||
for _, d := range pg.Status.Devices {
|
||||
if d.Hostname == *conf.Hostname {
|
||||
deviceAuthed = true
|
||||
break
|
||||
}
|
||||
return map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha{
|
||||
pgMinCapabilityVersion: *conf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if authKey != "" {
|
||||
conf.AuthKey = &authKey
|
||||
} else if !deviceAuthed {
|
||||
key, err := authKeyFromSecret(oldSecret)
|
||||
func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) {
|
||||
if cfgSecret == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
conf, err := latestConfigFromSecret(cfgSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err)
|
||||
}
|
||||
conf.AuthKey = key
|
||||
}
|
||||
capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha)
|
||||
|
||||
// AdvertiseServices config is set by ingress-pg-reconciler, so make sure we
|
||||
// don't overwrite it here.
|
||||
if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
capVerConfigs[106] = *conf
|
||||
return capVerConfigs, nil
|
||||
|
||||
if conf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error {
|
||||
if oldSecret == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)]
|
||||
if len(oldConfB) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var oldConf ipn.ConfigVAlpha
|
||||
if err := json.Unmarshal(oldConfB, &oldConf); err != nil {
|
||||
return fmt.Errorf("error unmarshalling existing config: %w", err)
|
||||
}
|
||||
conf.AdvertiseServices = oldConf.AdvertiseServices
|
||||
|
||||
return nil
|
||||
return conf.AdvertiseServices, nil
|
||||
}
|
||||
|
||||
func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error {
|
||||
@ -914,7 +939,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr
|
||||
dnsName: prefs.Config.UserProfile.LoginName,
|
||||
}
|
||||
pod := &corev1.Pod{}
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) {
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
} else if err == nil {
|
||||
nm.podUID = string(pod.UID)
|
||||
@ -932,17 +957,23 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint
|
||||
}
|
||||
|
||||
for _, m := range metadata {
|
||||
device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
if !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) {
|
||||
// Current Pod has not yet written its UID to the state Secret, data may
|
||||
// be stale.
|
||||
continue
|
||||
}
|
||||
|
||||
dev := tsapi.TailnetDevice{
|
||||
Hostname: device.Hostname,
|
||||
TailnetIPs: device.TailnetIPs,
|
||||
device := tsapi.TailnetDevice{}
|
||||
if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 {
|
||||
ips := []string{}
|
||||
if err := json.Unmarshal(ipsB, &ips); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract device IPs from state Secret %q: %w", m.stateSecret.Name, err)
|
||||
}
|
||||
device.TailnetIPs = ips
|
||||
}
|
||||
|
||||
if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok {
|
||||
device.Hostname = hostname
|
||||
}
|
||||
|
||||
if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 {
|
||||
@ -950,10 +981,10 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint
|
||||
for _, e := range ep {
|
||||
eps = append(eps, e.String())
|
||||
}
|
||||
dev.StaticEndpoints = eps
|
||||
device.StaticEndpoints = eps
|
||||
}
|
||||
|
||||
devices = append(devices, dev)
|
||||
devices = append(devices, device)
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
|
@ -351,7 +351,7 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S
|
||||
for i := range pgReplicas(pg) {
|
||||
secrets = append(secrets, &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", pg.Name, i),
|
||||
Name: pgStateSecretName(pg.Name, i),
|
||||
Namespace: namespace,
|
||||
Labels: pgSecretLabels(pg.Name, "state"),
|
||||
OwnerReferences: pgOwnerReference(pg),
|
||||
@ -422,6 +422,10 @@ func pgConfigSecretName(pgName string, i int32) string {
|
||||
return fmt.Sprintf("%s-%d-config", pgName, i)
|
||||
}
|
||||
|
||||
func pgStateSecretName(pgName string, i int32) string {
|
||||
return fmt.Sprintf("%s-%d", pgName, i)
|
||||
}
|
||||
|
||||
func pgEgressCMName(pg string) string {
|
||||
return fmt.Sprintf("%s-egress-config", pg)
|
||||
}
|
||||
|
@ -877,6 +877,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
if expected := 1; reconciler.egressProxyGroups.Len() != expected {
|
||||
@ -913,6 +914,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
})
|
||||
@ -924,12 +926,14 @@ func TestProxyGroup(t *testing.T) {
|
||||
})
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
|
||||
addNodeIDToStateSecrets(t, fc, pg)
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{
|
||||
Hostname: "hostname-nodeid-2",
|
||||
TailnetIPs: []string{"1.2.3.4", "::1"},
|
||||
@ -947,6 +951,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device.
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
})
|
||||
@ -1224,7 +1229,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
Namespace: tsNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tsoperator.TailscaledConfigFileName(106): existingConfigBytes,
|
||||
tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): existingConfigBytes,
|
||||
},
|
||||
})
|
||||
|
||||
@ -1261,7 +1266,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tsoperator.TailscaledConfigFileName(106): expectedConfigBytes,
|
||||
tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): expectedConfigBytes,
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -1423,6 +1428,11 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG
|
||||
s.Data = map[string][]byte{
|
||||
currentProfileKey: []byte(key),
|
||||
key: bytes,
|
||||
kubetypes.KeyDeviceIPs: []byte(`["1.2.3.4", "::1"]`),
|
||||
kubetypes.KeyDeviceFQDN: []byte(fmt.Sprintf("hostname-nodeid-%d.tails-scales.ts.net", i)),
|
||||
// TODO(tomhjp): We have two different mechanisms to retrieve device IDs.
|
||||
// Consolidate on this one.
|
||||
kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -897,14 +897,6 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func readAuthKey(secret *corev1.Secret, key string) (*string, error) {
|
||||
origConf := &ipn.ConfigVAlpha{}
|
||||
if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err)
|
||||
}
|
||||
return origConf.AuthKey, nil
|
||||
}
|
||||
|
||||
// tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy
|
||||
// state and auth key and returns tailscaled config files for currently supported proxy versions.
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) {
|
||||
@ -951,7 +943,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
|
||||
return capVerConfigs, nil
|
||||
}
|
||||
|
||||
func authKeyFromSecret(s *corev1.Secret) (key *string, err error) {
|
||||
// latestConfigFromSecret returns the ipn.ConfigVAlpha with the highest capver
|
||||
// as found in the Secret's key names, e.g. "cap-107.hujson" has capver 107.
|
||||
// If no config is found, it returns nil.
|
||||
func latestConfigFromSecret(s *corev1.Secret) (*ipn.ConfigVAlpha, error) {
|
||||
latest := tailcfg.CapabilityVersion(-1)
|
||||
latestStr := ""
|
||||
for k, data := range s.Data {
|
||||
@ -968,12 +963,31 @@ func authKeyFromSecret(s *corev1.Secret) (key *string, err error) {
|
||||
latest = v
|
||||
}
|
||||
}
|
||||
|
||||
var conf *ipn.ConfigVAlpha
|
||||
if latestStr != "" {
|
||||
conf = &ipn.ConfigVAlpha{}
|
||||
if err := json.Unmarshal([]byte(s.Data[latestStr]), conf); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling tailscaled config from Secret %q in field %q: %w", s.Name, latestStr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func authKeyFromSecret(s *corev1.Secret) (key *string, err error) {
|
||||
conf, err := latestConfigFromSecret(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Allow for configs that don't contain an auth key. Perhaps
|
||||
// users have some mechanisms to delete them. Auth key is
|
||||
// normally not needed after the initial login.
|
||||
if latestStr != "" {
|
||||
return readAuthKey(s, latestStr)
|
||||
if conf != nil {
|
||||
key = conf.AuthKey
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
}
|
||||
return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err)
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
logger.Infof("ProxyGroup is not (yet) ready")
|
||||
return false, nil
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
||||
Labels: pgSecretLabels("test-pg", "config"),
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tsoperator.TailscaledConfigFileName(106): []byte(`{"Version":""}`),
|
||||
tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`),
|
||||
},
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
||||
// Set ProxyGroup status to ready
|
||||
pg.Status.Conditions = []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Type: string(tsapi.ProxyGroupAvailable),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
|
@ -446,18 +446,15 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string)
|
||||
return tsapi.RecorderTailnetDevice{}, false, err
|
||||
}
|
||||
|
||||
return getDeviceInfo(ctx, r.tsClient, secret)
|
||||
}
|
||||
|
||||
func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) {
|
||||
prefs, ok, err := getDevicePrefs(secret)
|
||||
if !ok || err != nil {
|
||||
return tsapi.RecorderTailnetDevice{}, false, err
|
||||
}
|
||||
|
||||
// TODO(tomhjp): The profile info doesn't include addresses, which is why we
|
||||
// need the API. Should we instead update the profile to include addresses?
|
||||
device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil)
|
||||
// need the API. Should maybe update tsrecorder to write IPs to the state
|
||||
// Secret like containerboot does.
|
||||
device, err := r.tsClient.Device(ctx, string(prefs.Config.NodeID), nil)
|
||||
if err != nil {
|
||||
return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err)
|
||||
}
|
||||
|
@ -207,7 +207,8 @@ type ConditionType string
|
||||
const (
|
||||
ConnectorReady ConditionType = `ConnectorReady`
|
||||
ProxyClassReady ConditionType = `ProxyClassReady`
|
||||
ProxyGroupReady ConditionType = `ProxyGroupReady`
|
||||
ProxyGroupReady ConditionType = `ProxyGroupReady` // All proxy Pods running.
|
||||
ProxyGroupAvailable ConditionType = `ProxyGroupAvailable` // At least one proxy Pod running.
|
||||
ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service
|
||||
RecorderReady ConditionType = `RecorderReady`
|
||||
// EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed
|
||||
|
@ -137,8 +137,16 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool {
|
||||
}
|
||||
|
||||
func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool {
|
||||
return proxyGroupCondition(pg, tsapi.ProxyGroupReady)
|
||||
}
|
||||
|
||||
func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool {
|
||||
return proxyGroupCondition(pg, tsapi.ProxyGroupAvailable)
|
||||
}
|
||||
|
||||
func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) bool {
|
||||
idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool {
|
||||
return cond.Type == string(tsapi.ProxyGroupReady)
|
||||
return cond.Type == string(condType)
|
||||
})
|
||||
if idx == -1 {
|
||||
return false
|
||||
|
@ -19,6 +19,7 @@ type FakeClient struct {
|
||||
UpdateSecretImpl func(context.Context, *kubeapi.Secret) error
|
||||
JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error
|
||||
ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error)
|
||||
StrategicMergePatchSecretImpl func(context.Context, string, *kubeapi.Secret, string) error
|
||||
}
|
||||
|
||||
func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) {
|
||||
@ -30,8 +31,8 @@ func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*kubeapi.Secr
|
||||
func (fc *FakeClient) SetURL(_ string) {}
|
||||
func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) {
|
||||
}
|
||||
func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error {
|
||||
return nil
|
||||
func (fc *FakeClient) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error {
|
||||
return fc.StrategicMergePatchSecretImpl(ctx, name, s, fieldManager)
|
||||
}
|
||||
func (fc *FakeClient) Event(context.Context, string, string, string) error {
|
||||
return nil
|
||||
|
Loading…
x
Reference in New Issue
Block a user