mirror of
https://github.com/tailscale/tailscale.git
synced 2025-07-29 15:23:45 +00:00
cmd/{k8s-operator,k8s-proxy}: support new ProxyGroup type kube-apiserver
Adds a new enum value to ProxyGroup's .spec.Type field, kube-apiserver. Deploys the new k8s-proxy container image and configures it via a new config file specific to k8s-proxy. The config file is modelled after conffile but makes some minor changes to versioning to make sure we can maintain backwards compatible config within a single file so that it's easy to implement reading that config file directly from a Kubernetes Secret in future. Required significant updates to the operator's permissions so that it is allowed to assign the powerful impersonation cluster role that k8s-proxy requires to operate in authenticating mode. The proxies deployed for the new ProxyGroup type currently work using their own DNS name, but do not advertise a shared Tailscale Service, so are not yet HA. Tailscale Service creation is planned to be added in a separate reconciler loop. Updates #13358 Change-Id: If75514bc068e2288ad7ac12db15f13dbade5793b Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
parent
9b88169de7
commit
bee8cb1041
@ -41,6 +41,9 @@ rules:
|
|||||||
resources: ["customresourcedefinitions"]
|
resources: ["customresourcedefinitions"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
resourceNames: ["servicemonitors.monitoring.coreos.com"]
|
resourceNames: ["servicemonitors.monitoring.coreos.com"]
|
||||||
|
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||||
|
resources: ["clusterroles", "clusterrolebindings"]
|
||||||
|
verbs: ["get", "create", "patch", "update", "list", "watch", "deletecollection"]
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
|
@ -112,6 +112,7 @@ spec:
|
|||||||
enum:
|
enum:
|
||||||
- egress
|
- egress
|
||||||
- ingress
|
- ingress
|
||||||
|
- kube-apiserver
|
||||||
x-kubernetes-validations:
|
x-kubernetes-validations:
|
||||||
- rule: self == oldSelf
|
- rule: self == oldSelf
|
||||||
message: ProxyGroup type is immutable
|
message: ProxyGroup type is immutable
|
||||||
|
@ -2893,6 +2893,7 @@ spec:
|
|||||||
enum:
|
enum:
|
||||||
- egress
|
- egress
|
||||||
- ingress
|
- ingress
|
||||||
|
- kube-apiserver
|
||||||
type: string
|
type: string
|
||||||
x-kubernetes-validations:
|
x-kubernetes-validations:
|
||||||
- message: ProxyGroup type is immutable
|
- message: ProxyGroup type is immutable
|
||||||
@ -4880,6 +4881,19 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- clusterroles
|
||||||
|
- clusterrolebindings
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- deletecollection
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
|
@ -72,6 +72,7 @@ func main() {
|
|||||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||||
|
k8sProxyImage = defaultEnv("K8S_PROXY_IMAGE", "tailscale/k8s-proxy:latest")
|
||||||
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
||||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||||
tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "")
|
tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "")
|
||||||
@ -131,6 +132,7 @@ func main() {
|
|||||||
tailscaleNamespace: tsNamespace,
|
tailscaleNamespace: tsNamespace,
|
||||||
restConfig: restConfig,
|
restConfig: restConfig,
|
||||||
proxyImage: image,
|
proxyImage: image,
|
||||||
|
k8sProxyImage: k8sProxyImage,
|
||||||
proxyPriorityClassName: priorityClassName,
|
proxyPriorityClassName: priorityClassName,
|
||||||
proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer,
|
proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer,
|
||||||
proxyTags: tags,
|
proxyTags: tags,
|
||||||
@ -392,7 +394,6 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
Complete(&HAServiceReconciler{
|
Complete(&HAServiceReconciler{
|
||||||
recorder: eventRecorder,
|
recorder: eventRecorder,
|
||||||
tsClient: opts.tsClient,
|
tsClient: opts.tsClient,
|
||||||
tsnetServer: opts.tsServer,
|
|
||||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
logger: opts.log.Named("service-pg-reconciler"),
|
logger: opts.log.Named("service-pg-reconciler"),
|
||||||
@ -616,6 +617,7 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
|
|
||||||
tsNamespace: opts.tailscaleNamespace,
|
tsNamespace: opts.tailscaleNamespace,
|
||||||
proxyImage: opts.proxyImage,
|
proxyImage: opts.proxyImage,
|
||||||
|
k8sProxyImage: opts.k8sProxyImage,
|
||||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||||
tsFirewallMode: opts.proxyFirewallMode,
|
tsFirewallMode: opts.proxyFirewallMode,
|
||||||
defaultProxyClass: opts.defaultProxyClass,
|
defaultProxyClass: opts.defaultProxyClass,
|
||||||
@ -637,6 +639,7 @@ type reconcilerOpts struct {
|
|||||||
tailscaleNamespace string // namespace in which operator resources will be deployed
|
tailscaleNamespace string // namespace in which operator resources will be deployed
|
||||||
restConfig *rest.Config // config for connecting to the kube API server
|
restConfig *rest.Config // config for connecting to the kube API server
|
||||||
proxyImage string // <proxy-image-repo>:<proxy-image-tag>
|
proxyImage string // <proxy-image-repo>:<proxy-image-tag>
|
||||||
|
k8sProxyImage string // <k8s-proxy-image-repo>:<k8s-proxy-image-tag>
|
||||||
// proxyPriorityClassName isPriorityClass to be set for proxy Pods. This
|
// proxyPriorityClassName isPriorityClass to be set for proxy Pods. This
|
||||||
// is a legacy mechanism for cluster resource configuration options -
|
// is a legacy mechanism for cluster resource configuration options -
|
||||||
// going forward use ProxyClass.
|
// going forward use ProxyClass.
|
||||||
|
787
cmd/k8s-operator/proxy-pg.go
Normal file
787
cmd/k8s-operator/proxy-pg.go
Normal file
@ -0,0 +1,787 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
discoveryv1 "k8s.io/api/discovery/v1"
|
||||||
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
"tailscale.com/internal/client/tailscale"
|
||||||
|
"tailscale.com/ipn"
|
||||||
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
"tailscale.com/kube/ingressservices"
|
||||||
|
"tailscale.com/kube/kubetypes"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/tstime"
|
||||||
|
"tailscale.com/util/clientmetric"
|
||||||
|
"tailscale.com/util/mak"
|
||||||
|
"tailscale.com/util/set"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
proxyPGFinalizerName = "tailscale.com/proxy-pg-finalizer"
|
||||||
|
|
||||||
|
reasonAPIServerProxyPGInvalid = "APIServerProxyPGInvalid"
|
||||||
|
reasonAPIServerProxyPGValid = "APIServerProxyPGValid"
|
||||||
|
reasonAPIServerProxyPGConfigured = "APIServerProxyPGConfigured"
|
||||||
|
reasonAPIServerProxyPGNoBackendsConfigured = "APIServerProxyPGNoBackendsConfigured"
|
||||||
|
reasonAPIServerProxyPGCreationFailed = "APIServerProxyPGCreationFailed"
|
||||||
|
)
|
||||||
|
|
||||||
|
var gaugeAPIServerProxyPGResources = clientmetric.NewGauge(kubetypes.MetricAPIServerProxyPGResourceCount)
|
||||||
|
|
||||||
|
// APIServerProxyHAReconciler reconciles the Tailscale Services required for an
|
||||||
|
// HA deployment of the API Server Proxy.
|
||||||
|
type APIServerProxyHAReconciler struct {
|
||||||
|
client.Client
|
||||||
|
isDefaultLoadBalancer bool
|
||||||
|
recorder record.EventRecorder
|
||||||
|
logger *zap.SugaredLogger
|
||||||
|
tsClient tsClient
|
||||||
|
tsNamespace string
|
||||||
|
lc localClient
|
||||||
|
defaultTags []string
|
||||||
|
operatorID string // stableID of the operator's Tailscale device
|
||||||
|
|
||||||
|
clock tstime.Clock
|
||||||
|
|
||||||
|
mu sync.Mutex // protects following
|
||||||
|
managedPGs set.Slice[types.UID]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reconcile is the entry point for the controller.
|
||||||
|
func (r *APIServerProxyHAReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||||
|
logger := r.logger.With("ProxyGroup", req.NamespacedName)
|
||||||
|
logger.Debugf("starting reconcile")
|
||||||
|
defer logger.Debugf("reconcile finished")
|
||||||
|
|
||||||
|
svc := new(corev1.Service)
|
||||||
|
err = r.Get(ctx, req.NamespacedName, svc)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
// Request object not found, could have been deleted after reconcile request.
|
||||||
|
logger.Debugf("Service not found, assuming it was deleted")
|
||||||
|
return res, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return res, fmt.Errorf("failed to get Service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname := nameForService(svc)
|
||||||
|
logger = logger.With("hostname", hostname)
|
||||||
|
|
||||||
|
if !svc.DeletionTimestamp.IsZero() || !r.isTailscaleService(svc) {
|
||||||
|
logger.Debugf("Service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up")
|
||||||
|
_, err = r.maybeCleanup(ctx, hostname, svc, logger)
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsRequeue is set to true if the underlying Tailscale Service has changed as a result of this reconcile. If that
|
||||||
|
// is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the Tailscale Service in a
|
||||||
|
// multi-cluster Ingress setup have not resulted in another actor overwriting our Tailscale Service update.
|
||||||
|
needsRequeue := false
|
||||||
|
needsRequeue, err = r.maybeProvision(ctx, hostname, svc, logger)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), optimisticLockErrorMsg) {
|
||||||
|
logger.Infof("optimistic lock error, retrying: %s", err)
|
||||||
|
} else {
|
||||||
|
return reconcile.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if needsRequeue {
|
||||||
|
res = reconcile.Result{RequeueAfter: requeueInterval()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return reconcile.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeProvision ensures that a Tailscale Service for this ProxyGroup exists
|
||||||
|
// and is up to date.
|
||||||
|
//
|
||||||
|
// Returns true if the operation resulted in a Tailscale Service update.
|
||||||
|
func (r *APIServerProxyHAReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcsChanged bool, err error) {
|
||||||
|
oldSvcStatus := svc.Status.DeepCopy()
|
||||||
|
defer func() {
|
||||||
|
if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) {
|
||||||
|
// An error encountered here should get returned by the Reconcile function.
|
||||||
|
err = errors.Join(err, r.Client.Status().Update(ctx, svc))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
pgName := svc.Annotations[AnnotationProxyGroup]
|
||||||
|
if pgName == "" {
|
||||||
|
logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
logger = logger.With("ProxyGroup", pgName)
|
||||||
|
|
||||||
|
pg := &tsapi.ProxyGroup{}
|
||||||
|
if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
msg := fmt.Sprintf("ProxyGroup %q does not exist", pgName)
|
||||||
|
logger.Warnf(msg)
|
||||||
|
r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", msg)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err)
|
||||||
|
}
|
||||||
|
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||||
|
logger.Infof("ProxyGroup is not (yet) ready")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate Service configuration
|
||||||
|
if violations := validateService(svc); len(violations) > 0 {
|
||||||
|
msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", "))
|
||||||
|
r.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg)
|
||||||
|
r.logger.Error(msg)
|
||||||
|
tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !slices.Contains(svc.Finalizers, finalizerName) {
|
||||||
|
// This log line is printed exactly once during initial provisioning,
|
||||||
|
// because once the finalizer is in place this block gets skipped. So,
|
||||||
|
// this is a nice place to tell the operator that the high level,
|
||||||
|
// multi-reconcile operation is underway.
|
||||||
|
logger.Infof("exposing Service over tailscale")
|
||||||
|
svc.Finalizers = append(svc.Finalizers, finalizerName)
|
||||||
|
if err := r.Update(ctx, svc); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to add finalizer: %w", err)
|
||||||
|
}
|
||||||
|
r.mu.Lock()
|
||||||
|
r.managedPGs.Add(svc.UID)
|
||||||
|
gaugePGServiceResources.Set(int64(r.managedPGs.Len()))
|
||||||
|
r.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Ensure that if Service's hostname/name has changed, any Tailscale Service
|
||||||
|
// resources corresponding to the old hostname are cleaned up.
|
||||||
|
// In practice, this function will ensure that any Tailscale Services that are
|
||||||
|
// associated with the provided ProxyGroup and no longer owned by a
|
||||||
|
// Service are cleaned up. This is fine- it is not expensive and ensures
|
||||||
|
// that in edge cases (a single update changed both hostname and removed
|
||||||
|
// ProxyGroup annotation) the Tailscale Service is more likely to be
|
||||||
|
// (eventually) removed.
|
||||||
|
svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Ensure that there isn't a Tailscale Service with the same hostname
|
||||||
|
// already created and not owned by this Service.
|
||||||
|
serviceName := tailcfg.ServiceName("svc:" + hostname)
|
||||||
|
existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName)
|
||||||
|
if isErrorFeatureFlagNotEnabled(err) {
|
||||||
|
logger.Warn(msgFeatureFlagNotEnabled)
|
||||||
|
r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil && !isErrorTailscaleServiceNotFound(err) {
|
||||||
|
return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Generate the Tailscale Service owner annotation for new or existing Tailscale Service.
|
||||||
|
// This checks and ensures that Tailscale Service's owner references are updated
|
||||||
|
// for this Service and errors if that is not possible (i.e. because it
|
||||||
|
// appears that the Tailscale Service has been created by a non-operator actor).
|
||||||
|
updatedAnnotations, err := r.ownerAnnotations(existingTSSvc)
|
||||||
|
if err != nil {
|
||||||
|
instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname)
|
||||||
|
msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr)
|
||||||
|
logger.Warn(msg)
|
||||||
|
r.recorder.Event(svc, corev1.EventTypeWarning, "InvalidTailscaleService", msg)
|
||||||
|
tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, msg, r.clock, logger)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := r.defaultTags
|
||||||
|
if tstr, ok := svc.Annotations[AnnotationTags]; ok && tstr != "" {
|
||||||
|
tags = strings.Split(tstr, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
tsSvc := &tailscale.VIPService{
|
||||||
|
Name: serviceName,
|
||||||
|
Tags: tags,
|
||||||
|
Ports: []string{"do-not-validate"}, // we don't want to validate ports
|
||||||
|
Comment: managedTSServiceComment,
|
||||||
|
Annotations: updatedAnnotations,
|
||||||
|
}
|
||||||
|
if existingTSSvc != nil {
|
||||||
|
tsSvc.Addrs = existingTSSvc.Addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(irbekrm): right now if two Service resources attempt to apply different Tailscale Service configs (different
|
||||||
|
// tags) we can end up reconciling those in a loop. We should detect when a Service
|
||||||
|
// with the same generation number has been reconciled ~more than N times and stop attempting to apply updates.
|
||||||
|
if existingTSSvc == nil ||
|
||||||
|
!reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) ||
|
||||||
|
!ownersAreSetAndEqual(tsSvc, existingTSSvc) {
|
||||||
|
logger.Infof("Ensuring Tailscale Service exists and is up to date")
|
||||||
|
if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil {
|
||||||
|
return false, fmt.Errorf("error creating Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
existingTSSvc = tsSvc
|
||||||
|
}
|
||||||
|
|
||||||
|
cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error retrieving ingress services configuration: %w", err)
|
||||||
|
}
|
||||||
|
if cm == nil {
|
||||||
|
logger.Info("ConfigMap not yet created, waiting..")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingTSSvc.Addrs == nil {
|
||||||
|
existingTSSvc, err = r.tsClient.GetVIPService(ctx, tsSvc.Name)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error getting Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
if existingTSSvc.Addrs == nil {
|
||||||
|
// TODO(irbekrm): this should be a retry
|
||||||
|
return false, fmt.Errorf("unexpected: Tailscale Service addresses not populated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tsSvcIPv4 netip.Addr
|
||||||
|
var tsSvcIPv6 netip.Addr
|
||||||
|
for _, tsip := range existingTSSvc.Addrs {
|
||||||
|
ip, err := netip.ParseAddr(tsip)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error parsing Tailscale Service address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip.Is4() {
|
||||||
|
tsSvcIPv4 = ip
|
||||||
|
} else if ip.Is6() {
|
||||||
|
tsSvcIPv6 = ip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := ingressservices.Config{}
|
||||||
|
for _, cip := range svc.Spec.ClusterIPs {
|
||||||
|
ip, err := netip.ParseAddr(cip)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error parsing Kubernetes Service address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip.Is4() {
|
||||||
|
cfg.IPv4Mapping = &ingressservices.Mapping{
|
||||||
|
ClusterIP: ip,
|
||||||
|
TailscaleServiceIP: tsSvcIPv4,
|
||||||
|
}
|
||||||
|
} else if ip.Is6() {
|
||||||
|
cfg.IPv6Mapping = &ingressservices.Mapping{
|
||||||
|
ClusterIP: ip,
|
||||||
|
TailscaleServiceIP: tsSvcIPv6,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
existingCfg := cfgs[serviceName.String()]
|
||||||
|
if !reflect.DeepEqual(existingCfg, cfg) {
|
||||||
|
mak.Set(&cfgs, serviceName.String(), cfg)
|
||||||
|
cfgBytes, err := json.Marshal(cfgs)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error marshaling ingress config: %w", err)
|
||||||
|
}
|
||||||
|
mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, cfgBytes)
|
||||||
|
if err := r.Update(ctx, cm); err != nil {
|
||||||
|
return false, fmt.Errorf("error updating ingress config: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Infof("updating AdvertiseServices config")
|
||||||
|
// 4. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service
|
||||||
|
// IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved.
|
||||||
|
if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pg.Name, serviceName, &cfg, true, logger); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to update tailscaled config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := r.numberPodsAdvertising(ctx, pgName, serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get number of advertised Pods: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(irbekrm): here and when creating the Tailscale Service, verify if the
|
||||||
|
// error is not terminal (and therefore should not be reconciled). For
|
||||||
|
// example, if the hostname is already a hostname of a Tailscale node,
|
||||||
|
// the GET here will fail.
|
||||||
|
// If there are no Pods advertising the Tailscale Service (yet), we want to set 'svc.Status.LoadBalancer.Ingress' to nil"
|
||||||
|
var lbs []corev1.LoadBalancerIngress
|
||||||
|
conditionStatus := metav1.ConditionFalse
|
||||||
|
conditionType := tsapi.IngressSvcConfigured
|
||||||
|
conditionReason := reasonIngressSvcNoBackendsConfigured
|
||||||
|
conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", count, pgReplicas(pg))
|
||||||
|
if count != 0 {
|
||||||
|
dnsName, err := r.dnsNameForService(ctx, serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error getting DNS name for Service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lbs = []corev1.LoadBalancerIngress{
|
||||||
|
{
|
||||||
|
Hostname: dnsName,
|
||||||
|
IP: tsSvcIPv4.String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
conditionStatus = metav1.ConditionTrue
|
||||||
|
conditionReason = reasonIngressSvcConfigured
|
||||||
|
}
|
||||||
|
|
||||||
|
tsoperator.SetServiceCondition(svc, conditionType, conditionStatus, conditionReason, conditionMessage, r.clock, logger)
|
||||||
|
svc.Status.LoadBalancer.Ingress = lbs
|
||||||
|
|
||||||
|
return svcsChanged, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the
|
||||||
|
// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only
|
||||||
|
// deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference
|
||||||
|
// corresponding to this Service.
|
||||||
|
func (r *APIServerProxyHAReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) {
|
||||||
|
logger.Debugf("Ensuring any resources for Service are cleaned up")
|
||||||
|
ix := slices.Index(svc.Finalizers, finalizerName)
|
||||||
|
if ix < 0 {
|
||||||
|
logger.Debugf("no finalizer, nothing to do")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = r.deleteFinalizer(ctx, svc, logger)
|
||||||
|
}()
|
||||||
|
|
||||||
|
serviceName := tailcfg.ServiceName("svc:" + hostname)
|
||||||
|
// 1. Clean up the Tailscale Service.
|
||||||
|
svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error deleting Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Unadvertise the Tailscale Service.
|
||||||
|
pgName := svc.Annotations[AnnotationProxyGroup]
|
||||||
|
if err = r.maybeUpdateAdvertiseServicesConfig(ctx, svc, pgName, serviceName, nil, false, logger); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to update tailscaled config services: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: maybe wait for the service to be unadvertised, only then remove the backend routing
|
||||||
|
|
||||||
|
// 3. Clean up ingress config (routing rules).
|
||||||
|
cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error retrieving ingress services configuration: %w", err)
|
||||||
|
}
|
||||||
|
if cm == nil || cfgs == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
logger.Infof("Removing Tailscale Service %q from ingress config for ProxyGroup %q", hostname, pgName)
|
||||||
|
delete(cfgs, serviceName.String())
|
||||||
|
cfgBytes, err := json.Marshal(cfgs)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error marshaling ingress config: %w", err)
|
||||||
|
}
|
||||||
|
mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, cfgBytes)
|
||||||
|
return true, r.Update(ctx, cm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tailscale Services that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up.
|
||||||
|
// Returns true if the operation resulted in existing Tailscale Service updates (owner reference removal).
|
||||||
|
func (r *APIServerProxyHAReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) {
|
||||||
|
cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get ingress service config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
svcList := &corev1.ServiceList{}
|
||||||
|
if err := r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to find Services for ProxyGroup %q: %w", proxyGroupName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ingressConfigChanged := false
|
||||||
|
for tsSvcName, cfg := range config {
|
||||||
|
found := false
|
||||||
|
for _, svc := range svcList.Items {
|
||||||
|
if strings.EqualFold(fmt.Sprintf("svc:%s", nameForService(&svc)), tsSvcName) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
logger.Infof("Tailscale Service %q is not owned by any Service, cleaning up", tsSvcName)
|
||||||
|
|
||||||
|
// Make sure the Tailscale Service is not advertised in tailscaled or serve config.
|
||||||
|
if err = r.maybeUpdateAdvertiseServicesConfig(ctx, nil, proxyGroupName, tailcfg.ServiceName(tsSvcName), &cfg, false, logger); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to update tailscaled config services: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := config[tsSvcName]
|
||||||
|
if ok {
|
||||||
|
logger.Infof("Removing Tailscale Service %q from serve config", tsSvcName)
|
||||||
|
delete(config, tsSvcName)
|
||||||
|
ingressConfigChanged = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ingressConfigChanged {
|
||||||
|
configBytes, err := json.Marshal(config)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("marshaling serve config: %w", err)
|
||||||
|
}
|
||||||
|
mak.Set(&cm.BinaryData, ingressservices.IngressConfigKey, configBytes)
|
||||||
|
if err := r.Update(ctx, cm); err != nil {
|
||||||
|
return false, fmt.Errorf("updating serve config: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return svcsChanged, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) deleteFinalizer(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error {
|
||||||
|
svc.Finalizers = slices.DeleteFunc(svc.Finalizers, func(f string) bool {
|
||||||
|
return f == finalizerName
|
||||||
|
})
|
||||||
|
logger.Debugf("ensure %q finalizer is removed", finalizerName)
|
||||||
|
|
||||||
|
if err := r.Update(ctx, svc); err != nil {
|
||||||
|
return fmt.Errorf("failed to remove finalizer %q: %w", finalizerName, err)
|
||||||
|
}
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
r.managedPGs.Remove(svc.UID)
|
||||||
|
gaugePGServiceResources.Set(int64(r.managedPGs.Len()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) isTailscaleService(svc *corev1.Service) bool {
|
||||||
|
proxyGroup := svc.Annotations[AnnotationProxyGroup]
|
||||||
|
return r.shouldExpose(svc) && proxyGroup != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) shouldExpose(svc *corev1.Service) bool {
|
||||||
|
return r.shouldExposeClusterIP(svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) shouldExposeClusterIP(svc *corev1.Service) bool {
|
||||||
|
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return isTailscaleLoadBalancerService(svc, r.isDefaultLoadBalancer) || hasExposeAnnotation(svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tailnetCertDomain returns the base domain (TCD) of the current tailnet.
|
||||||
|
func (r *APIServerProxyHAReconciler) tailnetCertDomain(ctx context.Context) (string, error) {
|
||||||
|
st, err := r.lc.StatusWithoutPeers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error getting tailscale status: %w", err)
|
||||||
|
}
|
||||||
|
return st.CurrentTailnet.MagicDNSSuffix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupTailscaleService deletes any Tailscale Service by the provided name if it is not owned by operator instances other than this one.
|
||||||
|
// If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference.
|
||||||
|
// If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing.
|
||||||
|
// It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred.
|
||||||
|
func (r *APIServerProxyHAReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) {
|
||||||
|
svc, err := r.tsClient.GetVIPService(ctx, name)
|
||||||
|
if isErrorFeatureFlagNotEnabled(err) {
|
||||||
|
msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled)
|
||||||
|
logger.Warn(msg)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errResp := &tailscale.ErrResponse{}
|
||||||
|
ok := errors.As(err, errResp)
|
||||||
|
if ok && errResp.Status == http.StatusNotFound {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return false, fmt.Errorf("unexpected error getting Tailscale Service %q: %w", name.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, fmt.Errorf("error getting Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
if svc == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
o, err := parseOwnerAnnotation(svc)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error parsing Tailscale Service owner annotation: %w", err)
|
||||||
|
}
|
||||||
|
if o == nil || len(o.OwnerRefs) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
// Comparing with the operatorID only means that we will not be able to
|
||||||
|
// clean up Tailscale Services in cases where the operator was deleted from the
|
||||||
|
// cluster before deleting the Ingress. Perhaps the comparison could be
|
||||||
|
// 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'.
|
||||||
|
ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool {
|
||||||
|
return or.OperatorID == r.operatorID
|
||||||
|
})
|
||||||
|
if ix == -1 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if len(o.OwnerRefs) == 1 {
|
||||||
|
logger.Infof("Deleting Tailscale Service %q", name)
|
||||||
|
return false, r.tsClient.DeleteVIPService(ctx, name)
|
||||||
|
}
|
||||||
|
o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1)
|
||||||
|
logger.Infof("Updating Tailscale Service %q", name)
|
||||||
|
json, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err)
|
||||||
|
}
|
||||||
|
svc.Annotations[ownerAnnotation] = string(json)
|
||||||
|
return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) {
|
||||||
|
logger.Debugf("checking backend routes for service '%s'", serviceName)
|
||||||
|
pod := &corev1.Pod{}
|
||||||
|
err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, pod)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
logger.Debugf("Pod %q not found", replicaName)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get Pod: %w", err)
|
||||||
|
}
|
||||||
|
secret := &corev1.Secret{}
|
||||||
|
err = r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, secret)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
logger.Debugf("Secret %q not found", replicaName)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get Secret: %w", err)
|
||||||
|
}
|
||||||
|
if len(secret.Data) == 0 || secret.Data[ingressservices.IngressConfigKey] == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
gotCfgB := secret.Data[ingressservices.IngressConfigKey]
|
||||||
|
var gotCfgs ingressservices.Status
|
||||||
|
if err := json.Unmarshal(gotCfgB, &gotCfgs); err != nil {
|
||||||
|
return false, fmt.Errorf("error unmarshalling ingress config: %w", err)
|
||||||
|
}
|
||||||
|
statusUpToDate, err := isCurrentStatus(gotCfgs, pod, logger)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error checking ingress config status: %w", err)
|
||||||
|
}
|
||||||
|
if !statusUpToDate || !reflect.DeepEqual(gotCfgs.Configs.GetConfig(serviceName), wantsCfg) {
|
||||||
|
logger.Debugf("Pod %q is not ready to advertise Tailscale Service", pod.Name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) {
|
||||||
|
logger.Debugf("checking advertisement for service '%s'", serviceName)
|
||||||
|
// Get all config Secrets for this ProxyGroup.
|
||||||
|
// Get all Pods
|
||||||
|
secrets := &corev1.SecretList{}
|
||||||
|
if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil {
|
||||||
|
return fmt.Errorf("failed to list config Secrets: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc != nil && shouldBeAdvertised {
|
||||||
|
shouldBeAdvertised, err = r.checkEndpointsReady(ctx, svc, logger)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check readiness of Service '%s' endpoints: %w", svc.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range secrets.Items {
|
||||||
|
var updated bool
|
||||||
|
for fileName, confB := range secret.Data {
|
||||||
|
var conf ipn.ConfigVAlpha
|
||||||
|
if err := json.Unmarshal(confB, &conf); err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling ProxyGroup config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := slices.Index(conf.AdvertiseServices, serviceName.String())
|
||||||
|
isAdvertised := idx >= 0
|
||||||
|
switch {
|
||||||
|
case !isAdvertised && !shouldBeAdvertised:
|
||||||
|
logger.Debugf("service %q shouldn't be advertised", serviceName)
|
||||||
|
continue
|
||||||
|
case isAdvertised && shouldBeAdvertised:
|
||||||
|
logger.Debugf("service %q is already advertised", serviceName)
|
||||||
|
continue
|
||||||
|
case isAdvertised && !shouldBeAdvertised:
|
||||||
|
logger.Debugf("deleting advertisement for service %q", serviceName)
|
||||||
|
conf.AdvertiseServices = slices.Delete(conf.AdvertiseServices, idx, idx+1)
|
||||||
|
case shouldBeAdvertised:
|
||||||
|
replicaName, ok := strings.CutSuffix(secret.Name, "-config")
|
||||||
|
if !ok {
|
||||||
|
logger.Infof("[unexpected] unable to determine replica name from config Secret name %q, unable to determine if backend routing has been configured", secret.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ready, err := r.backendRoutesSetup(ctx, serviceName.String(), replicaName, pgName, cfg, logger)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error checking backend routes: %w", err)
|
||||||
|
}
|
||||||
|
if !ready {
|
||||||
|
logger.Debugf("service %q is not ready to be advertised", serviceName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.AdvertiseServices = append(conf.AdvertiseServices, serviceName.String())
|
||||||
|
}
|
||||||
|
confB, err := json.Marshal(conf)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error marshalling ProxyGroup config: %w", err)
|
||||||
|
}
|
||||||
|
mak.Set(&secret.Data, fileName, confB)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
if updated {
|
||||||
|
if err := r.Update(ctx, &secret); err != nil {
|
||||||
|
return fmt.Errorf("error updating ProxyGroup config Secret: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) {
|
||||||
|
// Get all state Secrets for this ProxyGroup.
|
||||||
|
secrets := &corev1.SecretList{}
|
||||||
|
if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for _, secret := range secrets.Items {
|
||||||
|
prefs, ok, err := getDevicePrefs(&secret)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error getting node metadata: %w", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if slices.Contains(prefs.AdvertiseServices, serviceName.String()) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ownerAnnotations returns the updated annotations required to ensure this
|
||||||
|
// instance of the operator is included as an owner. If the Tailscale Service is not
|
||||||
|
// nil, but does not contain an owner we return an error as this likely means
|
||||||
|
// that the Tailscale Service was created by something other than a Tailscale
|
||||||
|
// Kubernetes operator.
|
||||||
|
func (r *APIServerProxyHAReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) {
|
||||||
|
ref := OwnerRef{
|
||||||
|
OperatorID: r.operatorID,
|
||||||
|
}
|
||||||
|
if svc == nil {
|
||||||
|
c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}}
|
||||||
|
json, err := json.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err)
|
||||||
|
}
|
||||||
|
return map[string]string{
|
||||||
|
ownerAnnotation: string(json),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
o, err := parseOwnerAnnotation(svc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if o == nil || len(o.OwnerRefs) == 0 {
|
||||||
|
return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name)
|
||||||
|
}
|
||||||
|
if slices.Contains(o.OwnerRefs, ref) { // up to date
|
||||||
|
return svc.Annotations, nil
|
||||||
|
}
|
||||||
|
o.OwnerRefs = append(o.OwnerRefs, ref)
|
||||||
|
json, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error marshalling updated owner references: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newAnnots := make(map[string]string, len(svc.Annotations)+1)
|
||||||
|
for k, v := range svc.Annotations {
|
||||||
|
newAnnots[k] = v
|
||||||
|
}
|
||||||
|
newAnnots[ownerAnnotation] = string(json)
|
||||||
|
return newAnnots, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dnsNameForService returns the DNS name for the given Tailscale Service name.
|
||||||
|
func (r *APIServerProxyHAReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) {
|
||||||
|
s := svc.WithoutPrefix()
|
||||||
|
tcd, err := r.tailnetCertDomain(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error determining DNS name base: %w", err)
|
||||||
|
}
|
||||||
|
return s + "." + tcd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) getEndpointSlicesForService(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) ([]discoveryv1.EndpointSlice, error) {
|
||||||
|
logger.Debugf("looking for endpoint slices for svc with name '%s' in namespace '%s' matching label '%s=%s'", svc.Name, svc.Namespace, discoveryv1.LabelServiceName, svc.Name)
|
||||||
|
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
|
||||||
|
labels := map[string]string{discoveryv1.LabelServiceName: svc.Name}
|
||||||
|
eps := new(discoveryv1.EndpointSliceList)
|
||||||
|
if err := r.List(ctx, eps, client.InNamespace(svc.Namespace), client.MatchingLabels(labels)); err != nil {
|
||||||
|
return nil, fmt.Errorf("error listing EndpointSlices: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(eps.Items) == 0 {
|
||||||
|
logger.Debugf("Service '%s' EndpointSlice does not yet exist. We will reconcile again once it's created", svc.Name)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return eps.Items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *APIServerProxyHAReconciler) checkEndpointsReady(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) (bool, error) {
|
||||||
|
epss, err := r.getEndpointSlicesForService(ctx, svc, logger)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to list EndpointSlices for Service %q: %w", svc.Name, err)
|
||||||
|
}
|
||||||
|
for _, eps := range epss {
|
||||||
|
for _, ep := range eps.Endpoints {
|
||||||
|
if *ep.Conditions.Ready {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Debugf("could not find any ready Endpoints in EndpointSlice")
|
||||||
|
return false, nil
|
||||||
|
}
|
@ -68,6 +68,7 @@ type ProxyGroupReconciler struct {
|
|||||||
// User-specified defaults from the helm installation.
|
// User-specified defaults from the helm installation.
|
||||||
tsNamespace string
|
tsNamespace string
|
||||||
proxyImage string
|
proxyImage string
|
||||||
|
k8sProxyImage string
|
||||||
defaultTags []string
|
defaultTags []string
|
||||||
tsFirewallMode string
|
tsFirewallMode string
|
||||||
defaultProxyClass string
|
defaultProxyClass string
|
||||||
@ -252,6 +253,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
return fmt.Errorf("error provisioning state Secrets: %w", err)
|
return fmt.Errorf("error provisioning state Secrets: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sa := pgServiceAccount(pg, r.tsNamespace)
|
sa := pgServiceAccount(pg, r.tsNamespace)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
|
||||||
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
|
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
|
||||||
@ -260,6 +262,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("error provisioning ServiceAccount: %w", err)
|
return fmt.Errorf("error provisioning ServiceAccount: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
role := pgRole(pg, r.tsNamespace)
|
role := pgRole(pg, r.tsNamespace)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
||||||
r.ObjectMeta.Labels = role.ObjectMeta.Labels
|
r.ObjectMeta.Labels = role.ObjectMeta.Labels
|
||||||
@ -269,6 +272,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("error provisioning Role: %w", err)
|
return fmt.Errorf("error provisioning Role: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
roleBinding := pgRoleBinding(pg, r.tsNamespace)
|
roleBinding := pgRoleBinding(pg, r.tsNamespace)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
|
||||||
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
|
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
|
||||||
@ -279,6 +283,28 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("error provisioning RoleBinding: %w", err)
|
return fmt.Errorf("error provisioning RoleBinding: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||||
|
clusterRole := proxyClusterRole(pg)
|
||||||
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, clusterRole, func(r *rbacv1.ClusterRole) {
|
||||||
|
r.ObjectMeta.Labels = role.ObjectMeta.Labels
|
||||||
|
r.ObjectMeta.Annotations = role.ObjectMeta.Annotations
|
||||||
|
r.Rules = role.Rules
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("error provisioning ClusterRole: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterRoleBinding := proxyClusterRoleBinding(pg, r.tsNamespace)
|
||||||
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, clusterRoleBinding, func(r *rbacv1.ClusterRoleBinding) {
|
||||||
|
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
|
||||||
|
r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations
|
||||||
|
r.RoleRef = roleBinding.RoleRef
|
||||||
|
r.Subjects = roleBinding.Subjects
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("error provisioning ClusterRoleBinding: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||||
cm, hp := pgEgressCM(pg, r.tsNamespace)
|
cm, hp := pgEgressCM(pg, r.tsNamespace)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||||
@ -289,6 +315,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err)
|
return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||||
cm := pgIngressCM(pg, r.tsNamespace)
|
cm := pgIngressCM(pg, r.tsNamespace)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||||
@ -298,7 +325,13 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err)
|
return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass)
|
|
||||||
|
defaultImage := r.proxyImage
|
||||||
|
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||||
|
defaultImage = r.k8sProxyImage
|
||||||
|
}
|
||||||
|
|
||||||
|
ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, proxyClass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating StatefulSet spec: %w", err)
|
return fmt.Errorf("error generating StatefulSet spec: %w", err)
|
||||||
}
|
}
|
||||||
@ -483,43 +516,80 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret)
|
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||||
if err != nil {
|
hostname := fmt.Sprintf("%s-%d", pg.Name, i)
|
||||||
return "", fmt.Errorf("error creating tailscaled config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for cap, cfg := range configs {
|
if authKey == "" && existingCfgSecret != nil {
|
||||||
cfgJSON, err := json.Marshal(cfg)
|
deviceAuthed := false
|
||||||
|
for _, d := range pg.Status.Devices {
|
||||||
|
if d.Hostname == hostname {
|
||||||
|
deviceAuthed = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !deviceAuthed {
|
||||||
|
existingCfg := map[string]any{}
|
||||||
|
if err := json.Unmarshal(existingCfgSecret.Data["config.json"], &existingCfg); err != nil {
|
||||||
|
return "", fmt.Errorf("error unmarshalling existing config: %w", err)
|
||||||
|
}
|
||||||
|
if existingAuthKey, ok := existingCfg["AuthKey"]; ok {
|
||||||
|
authKey, _ = existingAuthKey.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := map[string]any{
|
||||||
|
"Version": "v1alpha1",
|
||||||
|
"Hostname": hostname,
|
||||||
|
"App": kubetypes.AppProxyAPIServerProxy,
|
||||||
|
}
|
||||||
|
if authKey != "" {
|
||||||
|
cfg["AuthKey"] = authKey
|
||||||
|
}
|
||||||
|
cfgB, err := json.Marshal(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("error marshalling tailscaled config: %w", err)
|
return "", fmt.Errorf("error marshalling k8s-proxy config: %w", err)
|
||||||
|
}
|
||||||
|
mak.Set(&cfgSecret.Data, "config.json", cfgB)
|
||||||
|
} else {
|
||||||
|
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error creating tailscaled config: %w", err)
|
||||||
}
|
}
|
||||||
mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The config sha256 sum is a value for a hash annotation used to trigger
|
for cap, cfg := range configs {
|
||||||
// pod restarts when tailscaled config changes. Any config changes apply
|
cfgJSON, err := json.Marshal(cfg)
|
||||||
// to all replicas, so it is sufficient to only hash the config for the
|
|
||||||
// first replica.
|
|
||||||
//
|
|
||||||
// In future, we're aiming to eliminate restarts altogether and have
|
|
||||||
// pods dynamically reload their config when it changes.
|
|
||||||
if i == 0 {
|
|
||||||
sum := sha256.New()
|
|
||||||
for _, cfg := range configs {
|
|
||||||
// Zero out the auth key so it doesn't affect the sha256 hash when we
|
|
||||||
// remove it from the config after the pods have all authed. Otherwise
|
|
||||||
// all the pods will need to restart immediately after authing.
|
|
||||||
cfg.AuthKey = nil
|
|
||||||
b, err := json.Marshal(cfg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||||
}
|
|
||||||
if _, err := sum.Write(b); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
|
mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON)
|
||||||
}
|
}
|
||||||
|
|
||||||
configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil))
|
// The config sha256 sum is a value for a hash annotation used to trigger
|
||||||
|
// pod restarts when tailscaled config changes. Any config changes apply
|
||||||
|
// to all replicas, so it is sufficient to only hash the config for the
|
||||||
|
// first replica.
|
||||||
|
//
|
||||||
|
// In future, we're aiming to eliminate restarts altogether and have
|
||||||
|
// pods dynamically reload their config when it changes.
|
||||||
|
if i == 0 {
|
||||||
|
sum := sha256.New()
|
||||||
|
for _, cfg := range configs {
|
||||||
|
// Zero out the auth key so it doesn't affect the sha256 hash when we
|
||||||
|
// remove it from the config after the pods have all authed. Otherwise
|
||||||
|
// all the pods will need to restart immediately after authing.
|
||||||
|
cfg.AuthKey = nil
|
||||||
|
b, err := json.Marshal(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if _, err := sum.Write(b); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingCfgSecret != nil {
|
if existingCfgSecret != nil {
|
||||||
|
@ -29,6 +29,9 @@ const deletionGracePeriodSeconds int64 = 360
|
|||||||
// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be
|
// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be
|
||||||
// applied over the top after.
|
// applied over the top after.
|
||||||
func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) {
|
func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) {
|
||||||
|
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||||
|
return kubeAPIServerStatefulSet(pg, namespace, image)
|
||||||
|
}
|
||||||
ss := new(appsv1.StatefulSet)
|
ss := new(appsv1.StatefulSet)
|
||||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||||
@ -225,9 +228,139 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
|||||||
// gracefully.
|
// gracefully.
|
||||||
ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds)
|
ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ss, nil
|
return ss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) {
|
||||||
|
sts := &appsv1.StatefulSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pg.Name,
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: pgLabels(pg.Name, nil),
|
||||||
|
OwnerReferences: pgOwnerReference(pg),
|
||||||
|
},
|
||||||
|
Spec: appsv1.StatefulSetSpec{
|
||||||
|
Replicas: ptr.To(pgReplicas(pg)),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: pgLabels(pg.Name, nil),
|
||||||
|
},
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pg.Name,
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: pgLabels(pg.Name, nil),
|
||||||
|
DeletionGracePeriodSeconds: ptr.To[int64](10),
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
ServiceAccountName: pg.Name,
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "tailscale",
|
||||||
|
Image: image,
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "POD_NAME",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POD_UID",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.uid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "TS_INTERNAL_APP",
|
||||||
|
Value: kubetypes.AppProxyGroupKubeAPIServer,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "TS_STATE",
|
||||||
|
Value: "kube:$(POD_NAME)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "TS_K8S_PROXY_CONFIG",
|
||||||
|
Value: "/etc/tsconfig/$(POD_NAME)/config.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "TS_DEBUG_ACME_DIRECTORY_URL",
|
||||||
|
Value: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "TS_DEBUG_ACME",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VolumeMounts: func() []corev1.VolumeMount {
|
||||||
|
var mounts []corev1.VolumeMount
|
||||||
|
|
||||||
|
// TODO(tomhjp): Read config directly from the secret instead. The
|
||||||
|
// mounts change on scaling up/down which causes unnecessary restarts
|
||||||
|
// for pods that haven't meaningfully changed.
|
||||||
|
for i := range pgReplicas(pg) {
|
||||||
|
mounts = append(mounts, corev1.VolumeMount{
|
||||||
|
Name: fmt.Sprintf("k8s-proxy-config-%d", i),
|
||||||
|
ReadOnly: true,
|
||||||
|
MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// mounts = append(mounts, corev1.VolumeMount{
|
||||||
|
// Name: pgKubeAPIServerCMName(pg.Name),
|
||||||
|
// MountPath: "/etc/proxies",
|
||||||
|
// ReadOnly: true,
|
||||||
|
// })
|
||||||
|
|
||||||
|
return mounts
|
||||||
|
}(),
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "k8s-proxy",
|
||||||
|
ContainerPort: 443,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: func() []corev1.Volume {
|
||||||
|
var volumes []corev1.Volume
|
||||||
|
for i := range pgReplicas(pg) {
|
||||||
|
volumes = append(volumes, corev1.Volume{
|
||||||
|
Name: fmt.Sprintf("k8s-proxy-config-%d", i),
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Secret: &corev1.SecretVolumeSource{
|
||||||
|
SecretName: pgConfigSecretName(pg.Name, i),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// volumes = append(volumes, corev1.Volume{
|
||||||
|
// Name: pgKubeAPIServerCMName(pg.Name),
|
||||||
|
// VolumeSource: corev1.VolumeSource{
|
||||||
|
// ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||||
|
// LocalObjectReference: corev1.LocalObjectReference{
|
||||||
|
// Name: pgKubeAPIServerCMName(pg.Name),
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// })
|
||||||
|
|
||||||
|
return volumes
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return sts, nil
|
||||||
|
}
|
||||||
|
|
||||||
func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount {
|
func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount {
|
||||||
return &corev1.ServiceAccount{
|
return &corev1.ServiceAccount{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -308,6 +441,44 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func proxyClusterRole(pg *tsapi.ProxyGroup) *rbacv1.ClusterRole {
|
||||||
|
return &rbacv1.ClusterRole{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-impersonation", pg.Name),
|
||||||
|
Labels: pgLabels(pg.Name, nil),
|
||||||
|
OwnerReferences: pgOwnerReference(pg),
|
||||||
|
},
|
||||||
|
Rules: []rbacv1.PolicyRule{
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"users", "groups"},
|
||||||
|
Verbs: []string{"impersonate"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func proxyClusterRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.ClusterRoleBinding {
|
||||||
|
return &rbacv1.ClusterRoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pg.Name,
|
||||||
|
Labels: pgLabels(pg.Name, nil),
|
||||||
|
OwnerReferences: pgOwnerReference(pg),
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{
|
||||||
|
{
|
||||||
|
Kind: "ServiceAccount",
|
||||||
|
Name: pg.Name,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
Kind: "ClusterRole",
|
||||||
|
Name: fmt.Sprintf("%s-impersonation", pg.Name),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) {
|
func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) {
|
||||||
for i := range pgReplicas(pg) {
|
for i := range pgReplicas(pg) {
|
||||||
secrets = append(secrets, &corev1.Secret{
|
secrets = append(secrets, &corev1.Secret{
|
||||||
@ -387,6 +558,10 @@ func pgEgressCMName(pg string) string {
|
|||||||
return fmt.Sprintf("%s-egress-config", pg)
|
return fmt.Sprintf("%s-egress-config", pg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func pgKubeAPIServerCMName(pg string) string {
|
||||||
|
return fmt.Sprintf("%s-kube-apiserver-config", pg)
|
||||||
|
}
|
||||||
|
|
||||||
// hasLocalAddrPortSet returns true if the proxyclass has the TS_LOCAL_ADDR_PORT env var set. For egress ProxyGroups,
|
// hasLocalAddrPortSet returns true if the proxyclass has the TS_LOCAL_ADDR_PORT env var set. For egress ProxyGroups,
|
||||||
// currently (2025-01-26) this means that the ProxyGroup does not support graceful failover.
|
// currently (2025-01-26) this means that the ProxyGroup does not support graceful failover.
|
||||||
func hasLocalAddrPortSet(proxyClass *tsapi.ProxyClass) bool {
|
func hasLocalAddrPortSet(proxyClass *tsapi.ProxyClass) bool {
|
||||||
|
@ -445,6 +445,41 @@ func TestProxyGroupTypes(t *testing.T) {
|
|||||||
t.Errorf("unexpected volume mounts (-want +got):\n%s", diff)
|
t.Errorf("unexpected volume mounts (-want +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("kubernetes_api_server_type", func(t *testing.T) {
|
||||||
|
pg := &tsapi.ProxyGroup{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-k8s-apiserver",
|
||||||
|
UID: "test-k8s-apiserver-uid",
|
||||||
|
},
|
||||||
|
Spec: tsapi.ProxyGroupSpec{
|
||||||
|
Type: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||||
|
Replicas: ptr.To[int32](1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := fc.Create(context.Background(), pg); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
|
verifyProxyGroupCounts(t, reconciler, 0, 0) // No ingress or egress counts for KubernetesAPIServer type.
|
||||||
|
|
||||||
|
sts := &appsv1.StatefulSet{}
|
||||||
|
if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||||
|
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the StatefulSet configuration for KubernetesAPIServer type.
|
||||||
|
if sts.Spec.Template.Spec.Containers[0].Name != "k8s-proxy" {
|
||||||
|
t.Errorf("unexpected container name %s, want k8s-proxy", sts.Spec.Template.Spec.Containers[0].Name)
|
||||||
|
}
|
||||||
|
if sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort != 443 {
|
||||||
|
t.Errorf("unexpected container port %d, want 443", sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort)
|
||||||
|
}
|
||||||
|
if sts.Spec.Template.Spec.Containers[0].Ports[0].Name != "https" {
|
||||||
|
t.Errorf("unexpected port name %s, want https", sts.Spec.Template.Spec.Containers[0].Ports[0].Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||||
|
@ -60,7 +60,6 @@ type HAServiceReconciler struct {
|
|||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
logger *zap.SugaredLogger
|
logger *zap.SugaredLogger
|
||||||
tsClient tsClient
|
tsClient tsClient
|
||||||
tsnetServer tsnetServer
|
|
||||||
tsNamespace string
|
tsNamespace string
|
||||||
lc localClient
|
lc localClient
|
||||||
defaultTags []string
|
defaultTags []string
|
||||||
|
@ -186,7 +186,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
|||||||
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
|
||||||
|
|
||||||
ft := &fakeTSClient{}
|
ft := &fakeTSClient{}
|
||||||
zl, err := zap.NewDevelopment()
|
zl, err := zap.NewDevelopment()
|
||||||
@ -209,7 +208,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
|||||||
clock: cl,
|
clock: cl,
|
||||||
defaultTags: []string{"tag:k8s"},
|
defaultTags: []string{"tag:k8s"},
|
||||||
tsNamespace: "operator-ns",
|
tsNamespace: "operator-ns",
|
||||||
tsnetServer: fakeTsnetServer,
|
|
||||||
logger: zl.Sugar(),
|
logger: zl.Sugar(),
|
||||||
recorder: record.NewFakeRecorder(10),
|
recorder: record.NewFakeRecorder(10),
|
||||||
lc: lc,
|
lc: lc,
|
||||||
|
101
cmd/k8s-proxy/internal/conf/conf.go
Normal file
101
cmd/k8s-proxy/internal/conf/conf.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
// Package conf contains code to load, manipulate, and access config file
|
||||||
|
// settings for k8s-proxy.
|
||||||
|
package conf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/tailscale/hujson"
|
||||||
|
"tailscale.com/types/opt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config describes a config file.
|
||||||
|
type Config struct {
|
||||||
|
Path string // disk path of HuJSON
|
||||||
|
Raw []byte // raw bytes from disk, in HuJSON form
|
||||||
|
Std []byte // standardized JSON form
|
||||||
|
Version string // "v1alpha1"
|
||||||
|
|
||||||
|
// Parsed is the parsed config, converted from its on-disk version to the
|
||||||
|
// latest known format.
|
||||||
|
//
|
||||||
|
// As of 2023-10-15 there is exactly one format ("alpha0") so this is both
|
||||||
|
// the on-disk format and the in-memory upgraded format.
|
||||||
|
Parsed ConfigV1Alpha1
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionedConfig allows specifying config at the root of the object, or in
|
||||||
|
// a versioned sub-object.
|
||||||
|
// e.g. {"version": "v1alpha1", "authKey": "abc123"}
|
||||||
|
// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}}
|
||||||
|
type VersionedConfig struct {
|
||||||
|
Version string `json:",omitempty"` // "v1alpha1"
|
||||||
|
|
||||||
|
// Latest version of the config.
|
||||||
|
*ConfigV1Alpha1
|
||||||
|
|
||||||
|
// Backwards compatibility version(s) of the config.
|
||||||
|
V1Alpha1 *ConfigV1Alpha1 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConfigV1Alpha1 struct {
|
||||||
|
AuthKey *string `json:",omitempty"`
|
||||||
|
Hostname *string `json:",omitempty"`
|
||||||
|
LogLevel *string `json:",omitempty"` // "debug", "info", "warn", "error"
|
||||||
|
App *string `json:",omitempty"` // "k8s-proxy-api-server-proxy"
|
||||||
|
KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy.
|
||||||
|
}
|
||||||
|
|
||||||
|
type KubeAPIServer struct {
|
||||||
|
AuthMode opt.Bool `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads and parses the config file at the provided path on disk.
|
||||||
|
func Load(path string) (*Config, error) {
|
||||||
|
var c Config
|
||||||
|
c.Path = path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.Raw, err = os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.Std, err = hujson.Standardize(c.Raw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err)
|
||||||
|
}
|
||||||
|
var ver VersionedConfig
|
||||||
|
if err := json.Unmarshal(c.Std, &ver); err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing config file %s: %w", path, err)
|
||||||
|
}
|
||||||
|
rootV1Alpha1 := (ver.Version == "v1alpha1")
|
||||||
|
backCompatV1Alpha1 := (ver.V1Alpha1 != nil)
|
||||||
|
switch {
|
||||||
|
case ver.Version == "":
|
||||||
|
return nil, fmt.Errorf("error parsing config file %s: no \"version\" field provided", path)
|
||||||
|
case rootV1Alpha1 && backCompatV1Alpha1:
|
||||||
|
// Exactly one of these should be set.
|
||||||
|
return nil, fmt.Errorf("error parsing config file %s: both root and v1alpha1 config provided", path)
|
||||||
|
case rootV1Alpha1 != backCompatV1Alpha1:
|
||||||
|
c.Version = "v1alpha1"
|
||||||
|
switch {
|
||||||
|
case rootV1Alpha1 && ver.ConfigV1Alpha1 != nil:
|
||||||
|
c.Parsed = *ver.ConfigV1Alpha1
|
||||||
|
case backCompatV1Alpha1:
|
||||||
|
c.Parsed = *ver.V1Alpha1
|
||||||
|
default:
|
||||||
|
c.Parsed = ConfigV1Alpha1{}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("error parsing config file %s: unsupported \"version\" value %q; want \"v1alpha1\"", path, ver.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c, nil
|
||||||
|
}
|
86
cmd/k8s-proxy/internal/conf/conf_test.go
Normal file
86
cmd/k8s-proxy/internal/conf/conf_test.go
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package conf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"tailscale.com/types/ptr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test that the config file can be at the root of the object, or in a versioned sub-object.
|
||||||
|
// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}}
|
||||||
|
func TestVersionedConfig(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
inputConfig string
|
||||||
|
expectedConfig ConfigV1Alpha1
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
"root_config_v1alpha1": {
|
||||||
|
inputConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")},
|
||||||
|
},
|
||||||
|
"backwards_compat_v1alpha1_config": {
|
||||||
|
// Client doesn't know about v1beta1, so it should read in v1alpha1.
|
||||||
|
inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456", "v1alpha1": {"authKey": "abc123"}}`,
|
||||||
|
expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")},
|
||||||
|
},
|
||||||
|
"unknown_key_allowed": {
|
||||||
|
// Adding new keys to the config doesn't require a version bump.
|
||||||
|
inputConfig: `{"version": "v1alpha1", "unknown-key": "unknown-value", "authKey": "abc123"}`,
|
||||||
|
expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")},
|
||||||
|
},
|
||||||
|
"version_only_no_authkey": {
|
||||||
|
inputConfig: `{"version": "v1alpha1"}`,
|
||||||
|
expectedConfig: ConfigV1Alpha1{},
|
||||||
|
},
|
||||||
|
"both_config_v1alpha1": {
|
||||||
|
inputConfig: `{"version": "v1alpha1", "authKey": "abc123", "v1alpha1": {"authKey": "def456"}}`,
|
||||||
|
expectedError: "both root and v1alpha1 config provided",
|
||||||
|
},
|
||||||
|
"empty_config": {
|
||||||
|
inputConfig: `{}`,
|
||||||
|
expectedError: `no "version" field provided`,
|
||||||
|
},
|
||||||
|
"v1beta1_without_backwards_compat": {
|
||||||
|
inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456"}`,
|
||||||
|
expectedError: `unsupported "version" value "v1beta1"; want "v1alpha1"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
if err := os.WriteFile(path, []byte(tc.inputConfig), 0644); err != nil {
|
||||||
|
t.Fatalf("failed to write config file: %v", err)
|
||||||
|
}
|
||||||
|
cfg, err := Load(path)
|
||||||
|
switch {
|
||||||
|
case tc.expectedError == "" && err != nil:
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
case tc.expectedError != "":
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error %q, got nil", tc.expectedError)
|
||||||
|
} else if !strings.Contains(err.Error(), tc.expectedError) {
|
||||||
|
t.Fatalf("expected error %q, got %q", tc.expectedError, err.Error())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cfg.Version != "v1alpha1" {
|
||||||
|
t.Fatalf("expected version %q, got %q", "v1alpha1", cfg.Version)
|
||||||
|
}
|
||||||
|
// Diff actual vs expected config.
|
||||||
|
if diff := cmp.Diff(cfg.Parsed, tc.expectedConfig); diff != "" {
|
||||||
|
t.Fatalf("Unexpected parsed config (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -15,13 +15,13 @@ import (
|
|||||||
|
|
||||||
"github.com/go-logr/zapr"
|
"github.com/go-logr/zapr"
|
||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
clientconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
|
kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
"tailscale.com/cmd/k8s-proxy/internal/conf"
|
||||||
"tailscale.com/hostinfo"
|
"tailscale.com/hostinfo"
|
||||||
"tailscale.com/ipn/store/kubestore"
|
"tailscale.com/ipn/store/kubestore"
|
||||||
apiproxy "tailscale.com/k8s-operator/api-proxy"
|
apiproxy "tailscale.com/k8s-operator/api-proxy"
|
||||||
"tailscale.com/kube/kubetypes"
|
|
||||||
"tailscale.com/tsnet"
|
"tailscale.com/tsnet"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
)
|
)
|
||||||
@ -34,47 +34,75 @@ func main() {
|
|||||||
|
|
||||||
func run() error {
|
func run() error {
|
||||||
var (
|
var (
|
||||||
podName = os.Getenv("POD_NAME")
|
podName = os.Getenv("POD_NAME")
|
||||||
|
configFile = os.Getenv("TS_K8S_PROXY_CONFIG")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var cfg *conf.Config
|
||||||
|
if configFile != "" {
|
||||||
|
var err error
|
||||||
|
cfg, err = conf.Load(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error loading config file %s: %w", configFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
if podName == "" {
|
if podName == "" {
|
||||||
return fmt.Errorf("POD_NAME environment variable is not set")
|
return fmt.Errorf("POD_NAME environment variable is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts []kzap.Opts
|
var opts []kzap.Opts
|
||||||
switch "dev" { // TODO(tomhjp): make configurable
|
if cfg.Parsed.LogLevel != nil {
|
||||||
case "info":
|
level, err := zapcore.ParseLevel(*cfg.Parsed.LogLevel)
|
||||||
opts = append(opts, kzap.Level(zapcore.InfoLevel))
|
if err != nil {
|
||||||
case "debug":
|
return fmt.Errorf("error parsing log level %q: %w", *cfg.Parsed.LogLevel, err)
|
||||||
opts = append(opts, kzap.Level(zapcore.DebugLevel))
|
}
|
||||||
case "dev":
|
opts = append(opts, kzap.Level(level))
|
||||||
opts = append(opts, kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel))
|
|
||||||
}
|
}
|
||||||
zlog := kzap.NewRaw(opts...).Sugar()
|
zlog := kzap.NewRaw(opts...).Sugar()
|
||||||
logf.SetLogger(zapr.NewLogger(zlog.Desugar()))
|
logf.SetLogger(zapr.NewLogger(zlog.Desugar()))
|
||||||
hostinfo.SetApp(kubetypes.AppProxy) // TODO(tomhjp): Advertise auth/noauth as well?
|
if cfg.Parsed.App != nil {
|
||||||
|
hostinfo.SetApp(*cfg.Parsed.App)
|
||||||
|
}
|
||||||
|
|
||||||
|
authMode := true
|
||||||
|
if cfg.Parsed.KubeAPIServer != nil {
|
||||||
|
v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get()
|
||||||
|
if ok {
|
||||||
|
authMode = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
authMode := true // TODO(tomhjp): make configurable
|
|
||||||
st, err := kubestore.New(logger.Discard, podName)
|
st, err := kubestore.New(logger.Discard, podName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating kubestore: %w", err)
|
return fmt.Errorf("error creating kubestore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var authKey string
|
||||||
|
if cfg.Parsed.AuthKey != nil {
|
||||||
|
authKey = *cfg.Parsed.AuthKey
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname := podName
|
||||||
|
if cfg.Parsed.Hostname != nil {
|
||||||
|
hostname = *cfg.Parsed.Hostname
|
||||||
|
}
|
||||||
|
|
||||||
ts := &tsnet.Server{
|
ts := &tsnet.Server{
|
||||||
Hostname: podName, // TODO(tomhjp): make configurable
|
Hostname: hostname,
|
||||||
Logf: zlog.Named("tailscaled").Debugf,
|
Logf: zlog.Named("tsnet").Debugf,
|
||||||
Store: st,
|
Store: st,
|
||||||
|
AuthKey: authKey,
|
||||||
}
|
}
|
||||||
if _, err := ts.Up(context.Background()); err != nil {
|
if _, err := ts.Up(context.Background()); err != nil {
|
||||||
return fmt.Errorf("error starting tailscale server: %v", err)
|
return fmt.Errorf("error starting tailscale server: %v", err)
|
||||||
}
|
}
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
restConfig, err := config.GetConfig()
|
restConfig, err := clientconfig.GetConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error getting kubeconfig: %w", err)
|
return fmt.Errorf("error getting kubeconfig: %w", err)
|
||||||
}
|
}
|
||||||
ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, ts, authMode)
|
ap, err := apiproxy.NewAPIServerProxy(zlog.Named("apiserver-proxy"), restConfig, ts, authMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating api server proxy: %w", err)
|
return fmt.Errorf("error creating api server proxy: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -600,7 +600,7 @@ _Appears in:_
|
|||||||
|
|
||||||
| Field | Description | Default | Validation |
|
| Field | Description | Default | Validation |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.<br />Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress] <br />Type: string <br /> |
|
| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.<br />Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress kube-apiserver] <br />Type: string <br /> |
|
||||||
| `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].<br />If you specify custom tags here, make sure you also make the operator<br />an owner of these tags.<br />See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.<br />Tags cannot be changed once a ProxyGroup device has been created.<br />Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` <br />Type: string <br /> |
|
| `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].<br />If you specify custom tags here, make sure you also make the operator<br />an owner of these tags.<br />See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.<br />Tags cannot be changed once a ProxyGroup device has been created.<br />Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` <br />Type: string <br /> |
|
||||||
| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.<br />Defaults to 2. | | Minimum: 0 <br /> |
|
| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.<br />Defaults to 2. | | Minimum: 0 <br /> |
|
||||||
| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created<br />by the ProxyGroup. Each device will have the integer number from its<br />StatefulSet pod appended to this prefix to form the full hostname.<br />HostnamePrefix can contain lower case letters, numbers and dashes, it<br />must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$` <br />Type: string <br /> |
|
| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created<br />by the ProxyGroup. Each device will have the integer number from its<br />StatefulSet pod appended to this prefix to form the full hostname.<br />HostnamePrefix can contain lower case letters, numbers and dashes, it<br />must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$` <br />Type: string <br /> |
|
||||||
@ -631,7 +631,7 @@ _Underlying type:_ _string_
|
|||||||
|
|
||||||
|
|
||||||
_Validation:_
|
_Validation:_
|
||||||
- Enum: [egress ingress]
|
- Enum: [egress ingress kube-apiserver]
|
||||||
- Type: string
|
- Type: string
|
||||||
|
|
||||||
_Appears in:_
|
_Appears in:_
|
||||||
|
@ -114,12 +114,13 @@ type TailnetDevice struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:validation:Type=string
|
// +kubebuilder:validation:Type=string
|
||||||
// +kubebuilder:validation:Enum=egress;ingress
|
// +kubebuilder:validation:Enum=egress;ingress;kube-apiserver
|
||||||
type ProxyGroupType string
|
type ProxyGroupType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProxyGroupTypeEgress ProxyGroupType = "egress"
|
ProxyGroupTypeEgress ProxyGroupType = "egress"
|
||||||
ProxyGroupTypeIngress ProxyGroupType = "ingress"
|
ProxyGroupTypeIngress ProxyGroupType = "ingress"
|
||||||
|
ProxyGroupTypeKubernetesAPIServer ProxyGroupType = "kube-apiserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +kubebuilder:validation:Type=string
|
// +kubebuilder:validation:Type=string
|
||||||
|
@ -5,21 +5,23 @@ package kubetypes
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Hostinfo App values for the Tailscale Kubernetes Operator components.
|
// Hostinfo App values for the Tailscale Kubernetes Operator components.
|
||||||
AppOperator = "k8s-operator"
|
AppOperator = "k8s-operator"
|
||||||
AppProxy = "k8s-proxy"
|
AppProxyAPIServerProxy = "k8s-proxy-api-server-proxy"
|
||||||
AppAPIServerProxy = "k8s-operator-proxy"
|
AppAPIServerProxy = "k8s-operator-proxy"
|
||||||
AppIngressProxy = "k8s-operator-ingress-proxy"
|
AppIngressProxy = "k8s-operator-ingress-proxy"
|
||||||
AppIngressResource = "k8s-operator-ingress-resource"
|
AppIngressResource = "k8s-operator-ingress-resource"
|
||||||
AppEgressProxy = "k8s-operator-egress-proxy"
|
AppEgressProxy = "k8s-operator-egress-proxy"
|
||||||
AppConnector = "k8s-operator-connector-resource"
|
AppConnector = "k8s-operator-connector-resource"
|
||||||
AppProxyGroupEgress = "k8s-operator-proxygroup-egress"
|
AppProxyGroupEgress = "k8s-operator-proxygroup-egress"
|
||||||
AppProxyGroupIngress = "k8s-operator-proxygroup-ingress"
|
AppProxyGroupIngress = "k8s-operator-proxygroup-ingress"
|
||||||
|
AppProxyGroupKubeAPIServer = "k8s-operator-proxygroup-kube-apiserver"
|
||||||
|
|
||||||
// Clientmetrics for Tailscale Kubernetes Operator components
|
// Clientmetrics for Tailscale Kubernetes Operator components
|
||||||
MetricIngressProxyCount = "k8s_ingress_proxies" // L3
|
MetricIngressProxyCount = "k8s_ingress_proxies" // L3
|
||||||
MetricIngressResourceCount = "k8s_ingress_resources" // L7
|
MetricIngressResourceCount = "k8s_ingress_resources" // L7
|
||||||
MetricIngressPGResourceCount = "k8s_ingress_pg_resources" // L7 on ProxyGroup
|
MetricIngressPGResourceCount = "k8s_ingress_pg_resources" // L7 on ProxyGroup
|
||||||
MetricServicePGResourceCount = "k8s_service_pg_resources" // L3 on ProxyGroup
|
MetricServicePGResourceCount = "k8s_service_pg_resources" // L3 on ProxyGroup
|
||||||
|
MetricAPIServerProxyPGResourceCount = "k8s_api_server_proxy_pg_resources"
|
||||||
MetricEgressProxyCount = "k8s_egress_proxies"
|
MetricEgressProxyCount = "k8s_egress_proxies"
|
||||||
MetricConnectorResourceCount = "k8s_connector_resources"
|
MetricConnectorResourceCount = "k8s_connector_resources"
|
||||||
MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
|
MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user