tailscale/cmd/k8s-operator/nameserver.go

279 lines
10 KiB
Go
Raw Normal View History

cmd/{k8s-nameserver,k8s-operator},k8s-operator: add a kube nameserver, make operator deploy it (#11017) * cmd/k8s-nameserver,k8s-operator: add a nameserver that can resolve ts.net DNS names in cluster. Adds a simple nameserver that can respond to A record queries for ts.net DNS names. It can respond to queries from in-memory records, populated from a ConfigMap mounted at /config. It dynamically updates its records as the ConfigMap contents changes. It will respond with NXDOMAIN to queries for any other record types (AAAA to be implemented in the future). It can respond to queries over UDP or TCP. It runs a miekg/dns DNS server with a single registered handler for ts.net domain names. Queries for other domain names will be refused. The intended use of this is: 1) to allow non-tailnet cluster workloads to talk to HTTPS tailnet services exposed via Tailscale operator egress over HTTPS 2) to allow non-tailnet cluster workloads to talk to workloads in the same cluster that have been exposed to tailnet over their MagicDNS names but on their cluster IPs. Updates tailscale/tailscale#10499 Signed-off-by: Irbe Krumina <irbe@tailscale.com> * cmd/k8s-operator/deploy/crds,k8s-operator: add DNSConfig CustomResource Definition DNSConfig CRD can be used to configure the operator to deploy kube nameserver (./cmd/k8s-nameserver) to cluster. Signed-off-by: Irbe Krumina <irbe@tailscale.com> * cmd/k8s-operator,k8s-operator: optionally reconcile nameserver resources Adds a new reconciler that reconciles DNSConfig resources. If a DNSConfig is deployed to cluster, the reconciler creates kube nameserver resources. This reconciler is only responsible for creating nameserver resources and not for populating nameserver's records. Signed-off-by: Irbe Krumina <irbe@tailscale.com> * cmd/{k8s-operator,k8s-nameserver}: generate DNSConfig CRD for charts, append to static manifests Signed-off-by: Irbe Krumina <irbe@tailscale.com> --------- Signed-off-by: Irbe Krumina <irbe@tailscale.com>
2024-03-27 20:18:17 +00:00
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
// tailscale-operator provides a way to expose services running in a Kubernetes
// cluster to your Tailnet and to make Tailscale nodes available to cluster
// workloads
package main
import (
"context"
"fmt"
"slices"
"sync"
_ "embed"
"github.com/pkg/errors"
"go.uber.org/zap"
xslices "golang.org/x/exp/slices"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"
tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/tstime"
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
)
const (
reasonNameserverCreationFailed = "NameserverCreationFailed"
reasonMultipleDNSConfigsPresent = "MultipleDNSConfigsPresent"
reasonNameserverCreated = "NameserverCreated"
messageNameserverCreationFailed = "Failed creating nameserver resources: %v"
messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present."
)
// NameserverReconciler knows how to create nameserver resources in cluster in
// response to users applying DNSConfig.
type NameserverReconciler struct {
client.Client
logger *zap.SugaredLogger
recorder record.EventRecorder
clock tstime.Clock
tsNamespace string
mu sync.Mutex // protects following
managedNameservers set.Slice[types.UID] // one or none
}
var (
gaugeNameserverResources = clientmetric.NewGauge("k8s_nameserver_resources")
)
func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
logger := a.logger.With("dnsConfig", req.Name)
logger.Debugf("starting reconcile")
defer logger.Debugf("reconcile finished")
var dnsCfg tsapi.DNSConfig
err = a.Get(ctx, req.NamespacedName, &dnsCfg)
if apierrors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
logger.Debugf("dnsconfig not found, assuming it was deleted")
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get dnsconfig: %w", err)
}
if !dnsCfg.DeletionTimestamp.IsZero() {
ix := xslices.Index(dnsCfg.Finalizers, FinalizerName)
if ix < 0 {
logger.Debugf("no finalizer, nothing to do")
return reconcile.Result{}, nil
}
logger.Info("Cleaning up DNSConfig resources")
if err := a.maybeCleanup(ctx, &dnsCfg, logger); err != nil {
logger.Errorf("error cleaning up reconciler resource: %v", err)
return res, err
}
dnsCfg.Finalizers = append(dnsCfg.Finalizers[:ix], dnsCfg.Finalizers[ix+1:]...)
if err := a.Update(ctx, &dnsCfg); err != nil {
logger.Errorf("error removing finalizer: %v", err)
return reconcile.Result{}, err
}
logger.Infof("Nameserver resources cleaned up")
return reconcile.Result{}, nil
}
oldCnStatus := dnsCfg.Status.DeepCopy()
setStatus := func(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConnectorConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetDNSConfigCondition(dnsCfg, tsapi.NameserverReady, status, reason, message, dnsCfg.Generation, a.clock, logger)
if !apiequality.Semantic.DeepEqual(oldCnStatus, dnsCfg.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
}
}
return res, err
}
var dnsCfgs tsapi.DNSConfigList
if err := a.List(ctx, &dnsCfgs); err != nil {
return res, fmt.Errorf("error listing DNSConfigs: %w", err)
}
if len(dnsCfgs.Items) > 1 { // enforce DNSConfig to be a singleton
msg := "invalid cluster configuration: more than one tailscale.com/dnsconfigs found. Please ensure that no more than one is created."
logger.Error(msg)
a.recorder.Event(&dnsCfg, corev1.EventTypeWarning, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
}
if !slices.Contains(dnsCfg.Finalizers, FinalizerName) {
logger.Infof("ensuring nameserver resources")
dnsCfg.Finalizers = append(dnsCfg.Finalizers, FinalizerName)
if err := a.Update(ctx, &dnsCfg); err != nil {
msg := fmt.Sprintf(messageNameserverCreationFailed, err)
logger.Error(msg)
return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonNameserverCreationFailed, msg)
}
}
if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil {
return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err)
}
a.mu.Lock()
a.managedNameservers.Add(dnsCfg.UID)
a.mu.Unlock()
gaugeNameserverResources.Set(int64(a.managedNameservers.Len()))
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: a.tsNamespace},
}
if err := a.Client.Get(ctx, client.ObjectKeyFromObject(svc), svc); err != nil {
return res, fmt.Errorf("error getting Service: %w", err)
}
if ip := svc.Spec.ClusterIP; ip != "" && ip != "None" {
dnsCfg.Status.NameserverStatus = &tsapi.NameserverStatus{
IP: ip,
}
return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
}
logger.Info("nameserver Service does not have an IP address allocated, waiting...")
return reconcile.Result{}, nil
}
func nameserverResourceLabels(name, namespace string) map[string]string {
labels := childResourceLabels(name, namespace, "nameserver")
labels["app.kubernetes.io/name"] = "tailscale"
labels["app.kubernetes.io/component"] = "nameserver"
return labels
}
func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error {
labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace)
dCfg := &deployConfig{
ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))},
namespace: a.tsNamespace,
labels: labels,
}
if tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo
}
if tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag
}
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} {
if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil {
return fmt.Errorf("error reconciling %s: %w", deployable.kind, err)
}
}
return nil
}
// maybeCleanup removes DNSConfig from being tracked. The cluster resources
// created, will be automatically garbage collected as they are owned by the
// DNSConfig.
func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error {
a.mu.Lock()
a.managedNameservers.Remove(dnsCfg.UID)
a.mu.Unlock()
gaugeNameserverResources.Set(int64(a.managedNameservers.Len()))
return nil
}
type deployable struct {
kind string
updateObj func(context.Context, *deployConfig, client.Client) error
}
type deployConfig struct {
imageRepo string
imageTag string
labels map[string]string
ownerRefs []metav1.OwnerReference
namespace string
}
var (
//go:embed deploy/manifests/nameserver/cm.yaml
cmYaml []byte
//go:embed deploy/manifests/nameserver/deploy.yaml
deployYaml []byte
//go:embed deploy/manifests/nameserver/sa.yaml
saYaml []byte
//go:embed deploy/manifests/nameserver/svc.yaml
svcYaml []byte
deployDeployable = deployable{
kind: "Deployment",
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
d := new(appsv1.Deployment)
if err := yaml.Unmarshal(deployYaml, &d); err != nil {
return fmt.Errorf("error unmarshalling Deployment yaml: %w", err)
}
d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag)
d.ObjectMeta.Namespace = cfg.namespace
d.ObjectMeta.Labels = cfg.labels
d.ObjectMeta.OwnerReferences = cfg.ownerRefs
updateF := func(oldD *appsv1.Deployment) {
oldD.Spec = d.Spec
}
_, err := createOrUpdate[appsv1.Deployment](ctx, kubeClient, cfg.namespace, d, updateF)
return err
},
}
saDeployable = deployable{
kind: "ServiceAccount",
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
sa := new(corev1.ServiceAccount)
if err := yaml.Unmarshal(saYaml, &sa); err != nil {
return fmt.Errorf("error unmarshalling ServiceAccount yaml: %w", err)
}
sa.ObjectMeta.Labels = cfg.labels
sa.ObjectMeta.OwnerReferences = cfg.ownerRefs
sa.ObjectMeta.Namespace = cfg.namespace
_, err := createOrUpdate(ctx, kubeClient, cfg.namespace, sa, func(*corev1.ServiceAccount) {})
return err
},
}
svcDeployable = deployable{
kind: "Service",
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
svc := new(corev1.Service)
if err := yaml.Unmarshal(svcYaml, &svc); err != nil {
return fmt.Errorf("error unmarshalling Service yaml: %w", err)
}
svc.ObjectMeta.Labels = cfg.labels
svc.ObjectMeta.OwnerReferences = cfg.ownerRefs
svc.ObjectMeta.Namespace = cfg.namespace
_, err := createOrUpdate[corev1.Service](ctx, kubeClient, cfg.namespace, svc, func(*corev1.Service) {})
return err
},
}
cmDeployable = deployable{
kind: "ConfigMap",
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
cm := new(corev1.ConfigMap)
if err := yaml.Unmarshal(cmYaml, &cm); err != nil {
return fmt.Errorf("error unmarshalling ConfigMap yaml: %w", err)
}
cm.ObjectMeta.Labels = cfg.labels
cm.ObjectMeta.OwnerReferences = cfg.ownerRefs
cm.ObjectMeta.Namespace = cfg.namespace
_, err := createOrUpdate[corev1.ConfigMap](ctx, kubeClient, cfg.namespace, cm, func(cm *corev1.ConfigMap) {})
return err
},
}
)