mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-13 22:47:30 +00:00
cmd/k8s-operator: allow specifying replicas for connectors
This commit adds a `replicas` field to the `Connector` custom resource that allows users to specify the number of desired replicas deployed for their connectors. This allows users to deploy exit nodes, subnet routers and app connectors in a highly available fashion. Signed-off-by: David Bond <davidsbond93@gmail.com>
This commit is contained in:
@@ -25,7 +25,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
@@ -188,7 +187,13 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
||||
}
|
||||
}
|
||||
|
||||
var replicas int32 = 1
|
||||
if cn.Spec.Replicas != nil {
|
||||
replicas = *cn.Spec.Replicas
|
||||
}
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
Replicas: replicas,
|
||||
ParentResourceName: cn.Name,
|
||||
ParentResourceUID: string(cn.UID),
|
||||
Hostname: hostname,
|
||||
@@ -219,16 +224,19 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
||||
} else {
|
||||
a.exitNodes.Remove(cn.UID)
|
||||
}
|
||||
|
||||
if cn.Spec.SubnetRouter != nil {
|
||||
a.subnetRouters.Add(cn.GetUID())
|
||||
} else {
|
||||
a.subnetRouters.Remove(cn.GetUID())
|
||||
}
|
||||
|
||||
if cn.Spec.AppConnector != nil {
|
||||
a.appConnectors.Add(cn.GetUID())
|
||||
} else {
|
||||
a.appConnectors.Remove(cn.GetUID())
|
||||
}
|
||||
|
||||
a.mu.Unlock()
|
||||
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
|
||||
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
|
||||
@@ -244,21 +252,28 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
||||
return err
|
||||
}
|
||||
|
||||
dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
devices, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dev == nil || dev.hostname == "" {
|
||||
logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth")
|
||||
// No hostname yet. Wait for the connector pod to auth.
|
||||
cn.Status.TailnetIPs = nil
|
||||
cn.Status.Hostname = ""
|
||||
return nil
|
||||
hostnames := make([]string, 0)
|
||||
cn.Status.TailnetIPs = make([]string, 0)
|
||||
|
||||
for _, dev := range devices {
|
||||
if dev == nil || dev.hostname == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ip := range dev.ips {
|
||||
cn.Status.TailnetIPs = append(cn.Status.TailnetIPs, ip)
|
||||
}
|
||||
|
||||
// TODO(davidsbond): we likely don't want to override this every time.
|
||||
hostnames = append(hostnames, dev.hostname)
|
||||
}
|
||||
|
||||
cn.Status.TailnetIPs = dev.ips
|
||||
cn.Status.Hostname = dev.hostname
|
||||
cn.Status.Hostname = strings.Join(hostnames, ",")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
@@ -36,6 +37,7 @@ func TestConnector(t *testing.T) {
|
||||
APIVersion: "tailscale.com/v1alpha1",
|
||||
},
|
||||
Spec: tsapi.ConnectorSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
SubnetRouter: &tsapi.SubnetRouter{
|
||||
AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"},
|
||||
},
|
||||
@@ -55,7 +57,8 @@ func TestConnector(t *testing.T) {
|
||||
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cr := &ConnectorReconciler{
|
||||
Client: fc,
|
||||
Client: fc,
|
||||
recorder: record.NewFakeRecorder(10),
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
@@ -78,6 +81,7 @@ func TestConnector(t *testing.T) {
|
||||
isExitNode: true,
|
||||
subnetRoutes: "10.40.0.0/14",
|
||||
app: kubetypes.AppConnector,
|
||||
replicas: cn.Spec.Replicas,
|
||||
}
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
@@ -156,6 +160,7 @@ func TestConnector(t *testing.T) {
|
||||
APIVersion: "tailscale.io/v1alpha1",
|
||||
},
|
||||
Spec: tsapi.ConnectorSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
SubnetRouter: &tsapi.SubnetRouter{
|
||||
AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"},
|
||||
},
|
||||
@@ -174,6 +179,7 @@ func TestConnector(t *testing.T) {
|
||||
subnetRoutes: "10.40.0.0/14",
|
||||
hostname: "test-connector",
|
||||
app: kubetypes.AppConnector,
|
||||
replicas: cn.Spec.Replicas,
|
||||
}
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
@@ -217,6 +223,7 @@ func TestConnectorWithProxyClass(t *testing.T) {
|
||||
APIVersion: "tailscale.io/v1alpha1",
|
||||
},
|
||||
Spec: tsapi.ConnectorSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
SubnetRouter: &tsapi.SubnetRouter{
|
||||
AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"},
|
||||
},
|
||||
@@ -260,6 +267,7 @@ func TestConnectorWithProxyClass(t *testing.T) {
|
||||
isExitNode: true,
|
||||
subnetRoutes: "10.40.0.0/14",
|
||||
app: kubetypes.AppConnector,
|
||||
replicas: cn.Spec.Replicas,
|
||||
}
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
@@ -311,6 +319,7 @@ func TestConnectorWithAppConnector(t *testing.T) {
|
||||
APIVersion: "tailscale.io/v1alpha1",
|
||||
},
|
||||
Spec: tsapi.ConnectorSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
AppConnector: &tsapi.AppConnector{},
|
||||
},
|
||||
}
|
||||
@@ -350,6 +359,7 @@ func TestConnectorWithAppConnector(t *testing.T) {
|
||||
hostname: "test-connector",
|
||||
app: kubetypes.AppConnector,
|
||||
isAppConnector: true,
|
||||
replicas: cn.Spec.Replicas,
|
||||
}
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
|
@@ -125,6 +125,13 @@ spec:
|
||||
resources created for this Connector. If unset, the operator will
|
||||
create resources with the default configuration.
|
||||
type: string
|
||||
replicas:
|
||||
description: |-
|
||||
Replicas specifies how many replicas to create the StatefulSet with.
|
||||
Defaults to 1.
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
subnetRouter:
|
||||
description: |-
|
||||
SubnetRouter defines subnet routes that the Connector device should
|
||||
|
@@ -150,6 +150,13 @@ spec:
|
||||
resources created for this Connector. If unset, the operator will
|
||||
create resources with the default configuration.
|
||||
type: string
|
||||
replicas:
|
||||
description: |-
|
||||
Replicas specifies how many replicas to create the StatefulSet with.
|
||||
Defaults to 1.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
subnetRouter:
|
||||
description: |-
|
||||
SubnetRouter defines subnet routes that the Connector device should
|
||||
|
@@ -212,6 +212,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
hostname := hostnameForIngress(ing)
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
Replicas: 1,
|
||||
Hostname: hostname,
|
||||
ParentResourceName: ing.Name,
|
||||
ParentResourceUID: string(ing.UID),
|
||||
@@ -227,27 +228,25 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
sts.ForwardClusterTrafficViaL7IngressProxy = true
|
||||
}
|
||||
|
||||
if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||
if _, err = a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||
return fmt.Errorf("failed to provision: %w", err)
|
||||
}
|
||||
|
||||
dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
devices, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err)
|
||||
}
|
||||
if dev == nil || dev.ingressDNSName == "" {
|
||||
logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress")
|
||||
// No hostname yet. Wait for the proxy pod to auth.
|
||||
ing.Status.LoadBalancer.Ingress = nil
|
||||
if err := a.Status().Update(ctx, ing); err != nil {
|
||||
return fmt.Errorf("failed to update ingress status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName)
|
||||
ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{
|
||||
{
|
||||
ing.Status.LoadBalancer.Ingress = nil
|
||||
for _, dev := range devices {
|
||||
if dev == nil || dev.ingressDNSName == "" {
|
||||
logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress")
|
||||
ing.Status.LoadBalancer.Ingress = nil
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName)
|
||||
ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{
|
||||
Hostname: dev.ingressDNSName,
|
||||
Ports: []networkingv1.IngressPortStatus{
|
||||
{
|
||||
@@ -255,11 +254,13 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
if err := a.Status().Update(ctx, ing); err != nil {
|
||||
|
||||
if err = a.Status().Update(ctx, ing); err != nil {
|
||||
return fmt.Errorf("failed to update ingress status: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -57,6 +57,7 @@ func TestTailscaleIngress(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -766,7 +767,7 @@ func ingress() *networkingv1.Ingress {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
UID: "1234-UID",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
|
@@ -122,6 +122,7 @@ func TestLoadBalancerClass(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -260,6 +261,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -372,6 +374,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -623,6 +626,7 @@ func TestAnnotations(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -729,6 +733,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -859,6 +864,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -999,6 +1005,7 @@ func TestCustomHostname(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -1111,6 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -1359,6 +1367,7 @@ func TestProxyClassForService(t *testing.T) {
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -1454,6 +1463,7 @@ func TestDefaultLoadBalancer(t *testing.T) {
|
||||
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName, "svc"))
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -1509,6 +1519,7 @@ func TestProxyFirewallMode(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
o := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
@@ -1800,6 +1811,7 @@ func Test_authKeyRemoval(t *testing.T) {
|
||||
hostname: "default-test",
|
||||
clusterTargetIP: "10.20.30.40",
|
||||
app: kubetypes.AppIngressProxy,
|
||||
replicas: ptr.To[int32](1),
|
||||
}
|
||||
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
@@ -1867,6 +1879,7 @@ func Test_externalNameService(t *testing.T) {
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -114,6 +115,7 @@ var (
|
||||
)
|
||||
|
||||
type tailscaleSTSConfig struct {
|
||||
Replicas int32
|
||||
ParentResourceName string
|
||||
ParentResourceUID string
|
||||
ChildResourceLabels map[string]string
|
||||
@@ -205,11 +207,12 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga
|
||||
}
|
||||
sts.ProxyClass = proxyClass
|
||||
|
||||
secretName, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
|
||||
secretNames, err := a.createOrGetSecrets(ctx, logger, sts, hsvc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
}
|
||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName)
|
||||
|
||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretNames)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
@@ -228,7 +231,7 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga
|
||||
// Cleanup removes all resources associated that were created by Provision with
|
||||
// the given labels. It returns true when all resources have been removed,
|
||||
// otherwise it returns false and the caller should retry later.
|
||||
func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) {
|
||||
func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (bool, error) {
|
||||
// Need to delete the StatefulSet first, and delete it with foreground
|
||||
// cascading deletion. That way, the pod that's writing to the Secret will
|
||||
// stop running before we start looking at the Secret's contents, and
|
||||
@@ -239,6 +242,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getting statefulset: %w", err)
|
||||
}
|
||||
|
||||
if sts != nil {
|
||||
if !sts.GetDeletionTimestamp().IsZero() {
|
||||
// Deletion in progress, check again later. We'll get another
|
||||
@@ -246,29 +250,39 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare
|
||||
logger.Debugf("waiting for statefulset %s/%s deletion", sts.GetNamespace(), sts.GetName())
|
||||
return false, nil
|
||||
}
|
||||
err := a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, client.InNamespace(a.operatorNamespace), client.MatchingLabels(labels), client.PropagationPolicy(metav1.DeletePropagationForeground))
|
||||
if err != nil {
|
||||
|
||||
options := []client.DeleteAllOfOption{
|
||||
client.InNamespace(a.operatorNamespace),
|
||||
client.MatchingLabels(labels),
|
||||
client.PropagationPolicy(metav1.DeletePropagationForeground),
|
||||
}
|
||||
|
||||
if err = a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil {
|
||||
return false, fmt.Errorf("deleting statefulset: %w", err)
|
||||
}
|
||||
|
||||
logger.Debugf("started deletion of statefulset %s/%s", sts.GetNamespace(), sts.GetName())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
dev, err := a.DeviceInfo(ctx, labels, logger)
|
||||
devices, err := a.DeviceInfo(ctx, labels, logger)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getting device info: %w", err)
|
||||
}
|
||||
if dev != nil && dev.id != "" {
|
||||
logger.Debugf("deleting device %s from control", string(dev.id))
|
||||
if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
|
||||
logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id))
|
||||
|
||||
for _, dev := range devices {
|
||||
if dev != nil && dev.id != "" {
|
||||
logger.Debugf("deleting device %s from control", string(dev.id))
|
||||
if err = a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
|
||||
logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id))
|
||||
} else {
|
||||
return false, fmt.Errorf("deleting device: %w", err)
|
||||
}
|
||||
} else {
|
||||
return false, fmt.Errorf("deleting device: %w", err)
|
||||
logger.Debugf("device %s deleted from control", string(dev.id))
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("device %s deleted from control", string(dev.id))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,91 +353,102 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l
|
||||
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
|
||||
}
|
||||
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName string, configs tailscaledConfigs, _ error) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Hardcode a -0 suffix so that in future, if we support
|
||||
// multiple StatefulSet replicas, we can provision -N for
|
||||
// those.
|
||||
Name: hsvc.Name + "-0",
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: stsC.ChildResourceLabels,
|
||||
},
|
||||
}
|
||||
var orig *corev1.Secret // unmodified copy of secret
|
||||
if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil {
|
||||
logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName())
|
||||
orig = secret.DeepCopy()
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return "", nil, err
|
||||
}
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecrets(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) {
|
||||
secretNames := make([]string, stsC.Replicas)
|
||||
|
||||
var authKey string
|
||||
if orig == nil {
|
||||
// Initially it contains only tailscaled config, but when the
|
||||
// proxy starts, it will also store there the state, certs and
|
||||
// ACME account key.
|
||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels)
|
||||
for i := range stsC.Replicas {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hsvc.Name + "-" + strconv.FormatInt(int64(i), 10),
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: stsC.ChildResourceLabels,
|
||||
},
|
||||
}
|
||||
|
||||
secretNames[i] = secret.Name
|
||||
|
||||
var orig *corev1.Secret // unmodified copy of secret
|
||||
if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil {
|
||||
logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName())
|
||||
orig = secret.DeepCopy()
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var authKey string
|
||||
if orig == nil {
|
||||
// Initially it contains only tailscaled config, but when the
|
||||
// proxy starts, it will also store there the state, certs and
|
||||
// ACME account key.
|
||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sts != nil {
|
||||
// StatefulSet exists, so we have already created the secret.
|
||||
// If the secret is missing, they should delete the StatefulSet.
|
||||
logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Create API Key secret which is going to be used by the statefulset
|
||||
// to authenticate with Tailscale.
|
||||
logger.Debugf("creating authkey for new tailscale proxy")
|
||||
tags := stsC.Tags
|
||||
if len(tags) == 0 {
|
||||
tags = a.defaultTags
|
||||
}
|
||||
authKey, err = newAuthKey(ctx, a.tsClient, tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configs, err := tailscaledConfig(stsC, authKey, orig)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
if sts != nil {
|
||||
// StatefulSet exists, so we have already created the secret.
|
||||
// If the secret is missing, they should delete the StatefulSet.
|
||||
logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName())
|
||||
return "", nil, nil
|
||||
|
||||
latest := tailcfg.CapabilityVersion(-1)
|
||||
var latestConfig ipn.ConfigVAlpha
|
||||
for key, val := range configs {
|
||||
fn := tsoperator.TailscaledConfigFileName(key)
|
||||
b, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||
}
|
||||
|
||||
mak.Set(&secret.StringData, fn, string(b))
|
||||
if key > latest {
|
||||
latest = key
|
||||
latestConfig = val
|
||||
}
|
||||
}
|
||||
// Create API Key secret which is going to be used by the statefulset
|
||||
// to authenticate with Tailscale.
|
||||
logger.Debugf("creating authkey for new tailscale proxy")
|
||||
tags := stsC.Tags
|
||||
if len(tags) == 0 {
|
||||
tags = a.defaultTags
|
||||
|
||||
if stsC.ServeConfig != nil {
|
||||
j, err := json.Marshal(stsC.ServeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mak.Set(&secret.StringData, "serve-config", string(j))
|
||||
}
|
||||
authKey, err = newAuthKey(ctx, a.tsClient, tags)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
configs, err := tailscaledConfig(stsC, authKey, orig)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
latest := tailcfg.CapabilityVersion(-1)
|
||||
var latestConfig ipn.ConfigVAlpha
|
||||
for key, val := range configs {
|
||||
fn := tsoperator.TailscaledConfigFileName(key)
|
||||
b, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||
}
|
||||
mak.Set(&secret.StringData, fn, string(b))
|
||||
if key > latest {
|
||||
latest = key
|
||||
latestConfig = val
|
||||
|
||||
if orig != nil {
|
||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Create(ctx, secret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stsC.ServeConfig != nil {
|
||||
j, err := json.Marshal(stsC.ServeConfig)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
mak.Set(&secret.StringData, "serve-config", string(j))
|
||||
}
|
||||
|
||||
if orig != nil {
|
||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Create(ctx, secret); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
return secret.Name, configs, nil
|
||||
return secretNames, nil
|
||||
}
|
||||
|
||||
// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted
|
||||
@@ -443,22 +468,36 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string {
|
||||
// It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the
|
||||
// Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the
|
||||
// returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret.
|
||||
func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) {
|
||||
sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels)
|
||||
if err != nil {
|
||||
return dev, err
|
||||
func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) {
|
||||
var secrets corev1.SecretList
|
||||
if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(childLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sec == nil {
|
||||
return dev, nil
|
||||
|
||||
devices := make([]*device, 0)
|
||||
for _, sec := range secrets.Items {
|
||||
podUID := ""
|
||||
pod := new(corev1.Pod)
|
||||
err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod)
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
// If the Pod is not found, we won't have its UID. We can still get the device information but the
|
||||
// capability version will be unknown.
|
||||
case err != nil:
|
||||
return nil, err
|
||||
default:
|
||||
podUID = string(pod.ObjectMeta.UID)
|
||||
}
|
||||
|
||||
info, err := deviceInfo(&sec, podUID, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
devices = append(devices, info)
|
||||
}
|
||||
podUID := ""
|
||||
pod := new(corev1.Pod)
|
||||
if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) {
|
||||
return dev, err
|
||||
} else if err == nil {
|
||||
podUID = string(pod.ObjectMeta.UID)
|
||||
}
|
||||
return deviceInfo(sec, podUID, logger)
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
// device contains tailscale state of a proxy device as gathered from its tailscale state Secret.
|
||||
@@ -534,7 +573,7 @@ var proxyYaml []byte
|
||||
//go:embed deploy/manifests/userspace-proxy.yaml
|
||||
var userspaceProxyYaml []byte
|
||||
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret string) (*appsv1.StatefulSet, error) {
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) {
|
||||
ss := new(appsv1.StatefulSet)
|
||||
if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
||||
if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
||||
@@ -573,18 +612,22 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod
|
||||
}
|
||||
|
||||
if sts.Replicas > 0 {
|
||||
ss.Spec.Replicas = ptr.To(sts.Replicas)
|
||||
}
|
||||
|
||||
// Generic containerboot configuration options.
|
||||
container.Env = append(container.Env,
|
||||
corev1.EnvVar{
|
||||
Name: "TS_KUBE_SECRET",
|
||||
Value: proxySecret,
|
||||
Value: "$(POD_NAME)",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
// New style is in the form of cap-<capability-version>.hujson.
|
||||
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
|
||||
Value: "/etc/tsconfig",
|
||||
Value: "/etc/tsconfig/$(POD_NAME)",
|
||||
},
|
||||
)
|
||||
|
||||
if sts.ForwardClusterTrafficViaL7IngressProxy {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS",
|
||||
@@ -592,20 +635,23 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
})
|
||||
}
|
||||
|
||||
configVolume := corev1.Volume{
|
||||
Name: "tailscaledconfig",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
for i, secret := range proxySecrets {
|
||||
configVolume := corev1.Volume{
|
||||
Name: "tailscaledconfig-" + strconv.Itoa(i),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: secret,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: fmt.Sprintf("tailscaledconfig-%d", i),
|
||||
ReadOnly: true,
|
||||
MountPath: path.Join("/etc/tsconfig/", secret),
|
||||
})
|
||||
}
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "tailscaledconfig",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tsconfig",
|
||||
})
|
||||
|
||||
if a.tsFirewallMode != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
@@ -643,22 +689,27 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
} else if sts.ServeConfig != nil {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_SERVE_CONFIG",
|
||||
Value: "/etc/tailscaled/serve-config",
|
||||
Value: "/etc/tailscaled/$(POD_NAME)-serve-config",
|
||||
})
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "serve-config",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tailscaled",
|
||||
})
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: "serve-config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
||||
|
||||
for _, secret := range proxySecrets {
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "serve-config-" + secret,
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tailscaled",
|
||||
})
|
||||
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: "serve-config-" + secret,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "serve-config-" + secret,
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
app, err := appInfoForProxy(sts)
|
||||
|
@@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
@@ -265,6 +264,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
}
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
Replicas: 1,
|
||||
ParentResourceName: svc.Name,
|
||||
ParentResourceUID: string(svc.UID),
|
||||
Hostname: nameForService(svc),
|
||||
@@ -332,39 +332,44 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
return nil
|
||||
}
|
||||
|
||||
dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
devices, err := a.ssr.DeviceInfo(ctx, crl, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device ID: %w", err)
|
||||
}
|
||||
if dev == nil || dev.hostname == "" {
|
||||
msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth"
|
||||
logger.Debug(msg)
|
||||
// No hostname yet. Wait for the proxy pod to auth.
|
||||
svc.Status.LoadBalancer.Ingress = nil
|
||||
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyPending, msg, a.clock, logger)
|
||||
return nil
|
||||
|
||||
for _, dev := range devices {
|
||||
if dev == nil || dev.hostname == "" {
|
||||
msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth"
|
||||
logger.Debug(msg)
|
||||
// No hostname yet. Wait for the proxy pod to auth.
|
||||
svc.Status.LoadBalancer.Ingress = nil
|
||||
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyPending, msg, a.clock, logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", "))
|
||||
svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{
|
||||
Hostname: dev.hostname,
|
||||
})
|
||||
|
||||
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("failed to parse cluster IP: %v", err)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger)
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
for _, ip := range dev.ips {
|
||||
addr, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family
|
||||
svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{IP: ip})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", "))
|
||||
ingress := []corev1.LoadBalancerIngress{
|
||||
{Hostname: dev.hostname},
|
||||
}
|
||||
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("failed to parse cluster IP: %v", err)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger)
|
||||
return errors.New(msg)
|
||||
}
|
||||
for _, ip := range dev.ips {
|
||||
addr, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family
|
||||
ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip})
|
||||
}
|
||||
}
|
||||
svc.Status.LoadBalancer.Ingress = ingress
|
||||
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger)
|
||||
return nil
|
||||
}
|
||||
|
@@ -69,9 +69,9 @@ type configOpts struct {
|
||||
shouldRemoveAuthKey bool
|
||||
secretExtraData map[string][]byte
|
||||
resourceVersion string
|
||||
|
||||
enableMetrics bool
|
||||
serviceMonitorLabels tsapi.Labels
|
||||
replicas *int32
|
||||
enableMetrics bool
|
||||
serviceMonitorLabels tsapi.Labels
|
||||
}
|
||||
|
||||
func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet {
|
||||
@@ -88,8 +88,8 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
|
||||
{Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: ptr.To(true),
|
||||
@@ -106,7 +106,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
var volumes []corev1.Volume
|
||||
volumes = []corev1.Volume{
|
||||
{
|
||||
Name: "tailscaledconfig",
|
||||
Name: "tailscaledconfig-0",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: opts.secretName,
|
||||
@@ -115,9 +115,9 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
},
|
||||
}
|
||||
tsContainer.VolumeMounts = []corev1.VolumeMount{{
|
||||
Name: "tailscaledconfig",
|
||||
Name: "tailscaledconfig-0",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tsconfig",
|
||||
MountPath: "/etc/tsconfig/" + opts.secretName,
|
||||
}}
|
||||
if opts.firewallMode != "" {
|
||||
tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{
|
||||
@@ -154,10 +154,21 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
if opts.serveConfig != nil {
|
||||
tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{
|
||||
Name: "TS_SERVE_CONFIG",
|
||||
Value: "/etc/tailscaled/serve-config",
|
||||
Value: "/etc/tailscaled/$(POD_NAME)-serve-config",
|
||||
})
|
||||
volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}})
|
||||
tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"})
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: "serve-config-" + opts.secretName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "serve-config-" + opts.secretName,
|
||||
Items: []corev1.KeyToPath{{
|
||||
Key: "serve-config",
|
||||
Path: "serve-config",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config-" + opts.secretName, ReadOnly: true, MountPath: "/etc/tailscaled"})
|
||||
}
|
||||
tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{
|
||||
Name: "TS_INTERNAL_APP",
|
||||
@@ -202,7 +213,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
||||
},
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
Replicas: opts.replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "1234-UID"},
|
||||
},
|
||||
@@ -266,15 +277,15 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
|
||||
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
|
||||
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
|
||||
{Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"},
|
||||
{Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"},
|
||||
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"},
|
||||
{Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)-serve-config"},
|
||||
{Name: "TS_INTERNAL_APP", Value: opts.app},
|
||||
},
|
||||
ImagePullPolicy: "Always",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{Name: "tailscaledconfig", ReadOnly: true, MountPath: "/etc/tsconfig"},
|
||||
{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"},
|
||||
{Name: "tailscaledconfig-0", ReadOnly: true, MountPath: "/etc/tsconfig/" + opts.secretName},
|
||||
{Name: "serve-config-" + opts.secretName, ReadOnly: true, MountPath: "/etc/tailscaled"},
|
||||
},
|
||||
}
|
||||
if opts.enableMetrics {
|
||||
@@ -302,16 +313,22 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
|
||||
}
|
||||
volumes := []corev1.Volume{
|
||||
{
|
||||
Name: "tailscaledconfig",
|
||||
Name: "tailscaledconfig-0",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: opts.secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
{Name: "serve-config",
|
||||
{
|
||||
Name: "serve-config-" + opts.secretName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}},
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "serve-config-" + opts.secretName,
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ss := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@@ -120,6 +120,7 @@ _Appears in:_
|
||||
| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should<br />expose to tailnet as a Tailscale subnet router.<br />https://tailscale.com/kb/1019/subnets/<br />If this field is unset, the device does not get configured as a Tailscale subnet router.<br />This field is mutually exclusive with the appConnector field. | | |
|
||||
| `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is<br />configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the<br />Connector does not act as an app connector.<br />Note that you will need to manually configure the permissions and the domains for the app connector via the<br />Admin panel.<br />Note also that the main tested and supported use case of this config option is to deploy an app connector on<br />Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose<br />cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have<br />tested or optimised for.<br />If you are using the app connector to access SaaS applications because you need a predictable egress IP that<br />can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows<br />via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT<br />device with a static IP address.<br />https://tailscale.com/kb/1281/app-connectors | | |
|
||||
| `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.<br />This field is mutually exclusive with the appConnector field.<br />https://tailscale.com/kb/1103/exit-nodes | | |
|
||||
| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.<br />Defaults to 1. | | Minimum: 0 <br /> |
|
||||
|
||||
|
||||
#### ConnectorStatus
|
||||
|
@@ -113,6 +113,12 @@ type ConnectorSpec struct {
|
||||
// https://tailscale.com/kb/1103/exit-nodes
|
||||
// +optional
|
||||
ExitNode bool `json:"exitNode"`
|
||||
|
||||
// Replicas specifies how many replicas to create the StatefulSet with.
|
||||
// Defaults to 1.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
}
|
||||
|
||||
// SubnetRouter defines subnet routes that should be exposed to tailnet via a
|
||||
|
@@ -110,6 +110,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) {
|
||||
*out = new(AppConnector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec.
|
||||
|
Reference in New Issue
Block a user