mirror of
https://github.com/tailscale/tailscale.git
synced 2025-03-25 18:51:01 +00:00
cmd/{k8s-operator,containerboot},kube/kubetypes: parse Ingresses for ingress ProxyGroup (#14583)
cmd/k8s-operator: add logic to parse L7 Ingresses in HA mode - Wrap the Tailscale API client used by the Kubernetes Operator into a client that knows how to manage VIPServices. - Create/Delete VIPServices and update serve config for L7 Ingresses for ProxyGroup. - Ensure that ingress ProxyGroup proxies mount serve config from a shared ConfigMap. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
parent
69a985fb1e
commit
817ba1c300
@ -65,6 +65,10 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan
|
||||
if err != nil {
|
||||
log.Fatalf("serve proxy: failed to read serve config: %v", err)
|
||||
}
|
||||
if sc == nil {
|
||||
log.Printf("serve proxy: no serve config at %q, skipping", path)
|
||||
continue
|
||||
}
|
||||
if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) {
|
||||
continue
|
||||
}
|
||||
@ -131,6 +135,9 @@ func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) {
|
||||
}
|
||||
j, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Serve config can be provided by users as well as the Kubernetes Operator (for its proxies). User-provided
|
||||
|
567
cmd/k8s-operator/ingress-for-pg.go
Normal file
567
cmd/k8s-operator/ingress-for-pg.go
Normal file
@ -0,0 +1,567 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
const (
|
||||
serveConfigKey = "serve-config.json"
|
||||
VIPSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s"
|
||||
// FinalizerNamePG is the finalizer used by the IngressPGReconciler
|
||||
FinalizerNamePG = "tailscale.com/ingress-pg-finalizer"
|
||||
)
|
||||
|
||||
var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount)
|
||||
|
||||
// IngressPGReconciler is a controller that reconciles Tailscale Ingresses should be exposed on an ingress ProxyGroup
|
||||
// (in HA mode).
|
||||
type IngressPGReconciler struct {
|
||||
client.Client
|
||||
|
||||
recorder record.EventRecorder
|
||||
logger *zap.SugaredLogger
|
||||
tsClient tsClient
|
||||
tsnetServer tsnetServer
|
||||
tsNamespace string
|
||||
lc localClient
|
||||
defaultTags []string
|
||||
|
||||
mu sync.Mutex // protects following
|
||||
// managedIngresses is a set of all ingress resources that we're currently
|
||||
// managing. This is only used for metrics.
|
||||
managedIngresses set.Slice[types.UID]
|
||||
}
|
||||
|
||||
// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA mode (on a ProxyGroup). It looks at all
|
||||
// Ingresses with tailscale.com/proxy-group annotation. For each such Ingress, it ensures that a VIPService named after
|
||||
// the hostname of the Ingress exists and is up to date. It also ensures that the serve config for the ingress
|
||||
// ProxyGroup is updated to route traffic for the VIPService to the Ingress's backend Services.
|
||||
// When an Ingress is deleted or unexposed, the VIPService and the associated serve config are cleaned up.
|
||||
// Ingress hostname change also results in the VIPService for the previous hostname being cleaned up and a new VIPService
|
||||
// being created for the new hostname.
|
||||
func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
logger := a.logger.With("Ingress", req.NamespacedName)
|
||||
logger.Debugf("starting reconcile")
|
||||
defer logger.Debugf("reconcile finished")
|
||||
|
||||
ing := new(networkingv1.Ingress)
|
||||
err = a.Get(ctx, req.NamespacedName, ing)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Request object not found, could have been deleted after reconcile request.
|
||||
logger.Debugf("Ingress not found, assuming it was deleted")
|
||||
return res, nil
|
||||
} else if err != nil {
|
||||
return res, fmt.Errorf("failed to get Ingress: %w", err)
|
||||
}
|
||||
|
||||
// hostname is the name of the VIPService that will be created for this Ingress as well as the first label in
|
||||
// the MagicDNS name of the Ingress.
|
||||
hostname := hostnameForIngress(ing)
|
||||
logger = logger.With("hostname", hostname)
|
||||
|
||||
if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) {
|
||||
return res, a.maybeCleanup(ctx, hostname, ing, logger)
|
||||
}
|
||||
|
||||
if err := a.maybeProvision(ctx, hostname, ing, logger); err != nil {
|
||||
return res, fmt.Errorf("failed to provision: %w", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// maybeProvision ensures that the VIPService and serve config for the Ingress are created or updated.
|
||||
func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
if err := validateIngressClass(ctx, a.Client); err != nil {
|
||||
logger.Infof("error validating tailscale IngressClass: %v.", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get and validate ProxyGroup readiness
|
||||
pgName := ing.Annotations[AnnotationProxyGroup]
|
||||
if pgName == "" {
|
||||
logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning")
|
||||
return nil
|
||||
}
|
||||
pg := &tsapi.ProxyGroup{}
|
||||
if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Infof("ProxyGroup %q does not exist", pgName)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("getting ProxyGroup %q: %w", pgName, err)
|
||||
}
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
// TODO(irbekrm): we need to reconcile ProxyGroup Ingresses on ProxyGroup changes to not miss the status update
|
||||
// in this case.
|
||||
logger.Infof("ProxyGroup %q is not ready", pgName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate Ingress configuration
|
||||
if err := a.validateIngress(ing, pg); err != nil {
|
||||
logger.Infof("invalid Ingress configuration: %v", err)
|
||||
a.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
if !IsHTTPSEnabledOnTailnet(a.tsnetServer) {
|
||||
a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work")
|
||||
}
|
||||
|
||||
logger = logger.With("proxy-group", pg)
|
||||
|
||||
if !slices.Contains(ing.Finalizers, FinalizerNamePG) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
// because once the finalizer is in place this block gets skipped. So,
|
||||
// this is a nice place to tell the operator that the high level,
|
||||
// multi-reconcile operation is underway.
|
||||
logger.Infof("exposing Ingress over tailscale")
|
||||
ing.Finalizers = append(ing.Finalizers, FinalizerNamePG)
|
||||
if err := a.Update(ctx, ing); err != nil {
|
||||
return fmt.Errorf("failed to add finalizer: %w", err)
|
||||
}
|
||||
a.mu.Lock()
|
||||
a.managedIngresses.Add(ing.UID)
|
||||
gaugePGIngressResources.Set(int64(a.managedIngresses.Len()))
|
||||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
// 1. Ensure that if Ingress' hostname has changed, any VIPService resources corresponding to the old hostname
|
||||
// are cleaned up.
|
||||
// In practice, this function will ensure that any VIPServices that are associated with the provided ProxyGroup
|
||||
// and no longer owned by an Ingress are cleaned up. This is fine- it is not expensive and ensures that in edge
|
||||
// cases (a single update changed both hostname and removed ProxyGroup annotation) the VIPService is more likely
|
||||
// to be (eventually) removed.
|
||||
if err := a.maybeCleanupProxyGroup(ctx, pgName, logger); err != nil {
|
||||
return fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err)
|
||||
}
|
||||
|
||||
// 2. Ensure that there isn't a VIPService with the same hostname already created and not owned by this Ingress.
|
||||
// TODO(irbekrm): perhaps in future we could have record names being stored on VIPServices. I am not certain if
|
||||
// there might not be edge cases (custom domains, etc?) where attempting to determine the DNS name of the
|
||||
// VIPService in this way won't be incorrect.
|
||||
tcd, err := a.tailnetCertDomain(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error determining DNS name base: %w", err)
|
||||
}
|
||||
dnsName := hostname + "." + tcd
|
||||
existingVIPSvc, err := a.tsClient.getVIPServiceByName(ctx, hostname)
|
||||
// TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore
|
||||
// should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET
|
||||
// here will fail.
|
||||
if err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound {
|
||||
return fmt.Errorf("error getting VIPService %q: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
if existingVIPSvc != nil && !isVIPServiceForIngress(existingVIPSvc, ing) {
|
||||
logger.Infof("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually and recreate this Ingress to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName)
|
||||
a.recorder.Event(ing, corev1.EventTypeWarning, "ConflictingVIPServiceExists", fmt.Sprintf("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// 3. Ensure that the serve config for the ProxyGroup contains the VIPService
|
||||
cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting ingress serve config: %w", err)
|
||||
}
|
||||
if cm == nil {
|
||||
logger.Infof("no ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.")
|
||||
return nil
|
||||
}
|
||||
ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName))
|
||||
handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get handlers for ingress: %w", err)
|
||||
}
|
||||
ingCfg := &ipn.ServiceConfig{
|
||||
TCP: map[uint16]*ipn.TCPPortHandler{
|
||||
443: {
|
||||
HTTPS: true,
|
||||
},
|
||||
},
|
||||
Web: map[ipn.HostPort]*ipn.WebServerConfig{
|
||||
ep: {
|
||||
Handlers: handlers,
|
||||
},
|
||||
},
|
||||
}
|
||||
var gotCfg *ipn.ServiceConfig
|
||||
if cfg != nil && cfg.Services != nil {
|
||||
gotCfg = cfg.Services[hostname]
|
||||
}
|
||||
if !reflect.DeepEqual(gotCfg, ingCfg) {
|
||||
logger.Infof("Updating serve config")
|
||||
mak.Set(&cfg.Services, hostname, ingCfg)
|
||||
cfgBytes, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling serve config: %w", err)
|
||||
}
|
||||
mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes)
|
||||
if err := a.Update(ctx, cm); err != nil {
|
||||
return fmt.Errorf("error updating serve config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Ensure that the VIPService exists and is up to date.
|
||||
tags := a.defaultTags
|
||||
if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||
tags = strings.Split(tstr, ",")
|
||||
}
|
||||
|
||||
vipSvc := &VIPService{
|
||||
Name: hostname,
|
||||
Tags: tags,
|
||||
Ports: []string{"443"}, // always 443 for Ingress
|
||||
Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID),
|
||||
}
|
||||
if existingVIPSvc != nil {
|
||||
vipSvc.Addrs = existingVIPSvc.Addrs
|
||||
}
|
||||
if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) {
|
||||
logger.Infof("Ensuring VIPService %q exists and is up to date", hostname)
|
||||
if err := a.tsClient.createOrUpdateVIPServiceByName(ctx, vipSvc); err != nil {
|
||||
logger.Infof("error creating VIPService: %v", err)
|
||||
return fmt.Errorf("error creating VIPService: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Update Ingress status
|
||||
oldStatus := ing.Status.DeepCopy()
|
||||
// TODO(irbekrm): once we have ingress ProxyGroup, we can determine if instances are ready to route traffic to the VIPService
|
||||
ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{
|
||||
{
|
||||
Hostname: dnsName,
|
||||
Ports: []networkingv1.IngressPortStatus{
|
||||
{
|
||||
Protocol: "TCP",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) {
|
||||
return nil
|
||||
}
|
||||
if err := a.Status().Update(ctx, ing); err != nil {
|
||||
return fmt.Errorf("failed to update Ingress status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the
|
||||
// Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any
|
||||
// VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up.
|
||||
func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) error {
|
||||
// Get serve config for the ProxyGroup
|
||||
cm, cfg, err := a.proxyGroupServeConfig(ctx, proxyGroupName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting serve config: %w", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
return nil // ProxyGroup does not have any VIPServices
|
||||
}
|
||||
|
||||
ingList := &networkingv1.IngressList{}
|
||||
if err := a.List(ctx, ingList); err != nil {
|
||||
return fmt.Errorf("listing Ingresses: %w", err)
|
||||
}
|
||||
serveConfigChanged := false
|
||||
// For each VIPService in serve config...
|
||||
for vipHostname := range cfg.Services {
|
||||
// ...check if there is currently an Ingress with this hostname
|
||||
found := false
|
||||
for _, i := range ingList.Items {
|
||||
ingressHostname := hostnameForIngress(&i)
|
||||
if ingressHostname == vipHostname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipHostname)
|
||||
svc, err := a.getVIPService(ctx, vipHostname, logger)
|
||||
if err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound {
|
||||
delete(cfg.Services, vipHostname)
|
||||
serveConfigChanged = true
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if isVIPServiceForAnyIngress(svc) {
|
||||
logger.Infof("cleaning up orphaned VIPService %q", vipHostname)
|
||||
if err := a.tsClient.deleteVIPServiceByName(ctx, vipHostname); err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound {
|
||||
return fmt.Errorf("deleting VIPService %q: %w", vipHostname, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(cfg.Services, vipHostname)
|
||||
serveConfigChanged = true
|
||||
}
|
||||
}
|
||||
|
||||
if serveConfigChanged {
|
||||
cfgBytes, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling serve config: %w", err)
|
||||
}
|
||||
mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes)
|
||||
if err := a.Update(ctx, cm); err != nil {
|
||||
return fmt.Errorf("updating serve config: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCleanup ensures that any resources, such as a VIPService created for this Ingress, are cleaned up when the
|
||||
// Ingress is being deleted or is unexposed.
|
||||
func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
logger.Debugf("Ensuring any resources for Ingress are cleaned up")
|
||||
ix := slices.Index(ing.Finalizers, FinalizerNamePG)
|
||||
if ix < 0 {
|
||||
logger.Debugf("no finalizer, nothing to do")
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.managedIngresses.Remove(ing.UID)
|
||||
gaugePGIngressResources.Set(int64(a.managedIngresses.Len()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// 1. Check if there is a VIPService created for this Ingress.
|
||||
pg := ing.Annotations[AnnotationProxyGroup]
|
||||
cm, cfg, err := a.proxyGroupServeConfig(ctx, pg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting ProxyGroup serve config: %w", err)
|
||||
}
|
||||
// VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not
|
||||
// found in the serve config, we can assume that there is no VIPService. TODO(irbekrm): once we have ingress
|
||||
// ProxyGroup, we will probably add currently exposed VIPServices to its status. At that point, we can use the
|
||||
// status rather than checking the serve config each time.
|
||||
if cfg == nil || cfg.Services == nil || cfg.Services[hostname] == nil {
|
||||
return nil
|
||||
}
|
||||
logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname)
|
||||
|
||||
// 2. Delete the VIPService.
|
||||
if err := a.deleteVIPServiceIfExists(ctx, hostname, ing, logger); err != nil {
|
||||
return fmt.Errorf("error deleting VIPService: %w", err)
|
||||
}
|
||||
|
||||
// 3. Remove the VIPService from the serve config for the ProxyGroup.
|
||||
logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg)
|
||||
delete(cfg.Services, hostname)
|
||||
cfgBytes, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling serve config: %w", err)
|
||||
}
|
||||
mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes)
|
||||
if err := a.Update(ctx, cm); err != nil {
|
||||
return fmt.Errorf("error updating ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
|
||||
if err := a.deleteFinalizer(ctx, ing, logger); err != nil {
|
||||
return fmt.Errorf("failed to remove finalizer: %w", err)
|
||||
}
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.managedIngresses.Remove(ing.UID)
|
||||
gaugePGIngressResources.Set(int64(a.managedIngresses.Len()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
found := false
|
||||
ing.Finalizers = slices.DeleteFunc(ing.Finalizers, func(f string) bool {
|
||||
found = true
|
||||
return f == FinalizerNamePG
|
||||
})
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
logger.Debug("ensure %q finalizer is removed", FinalizerNamePG)
|
||||
|
||||
if err := a.Update(ctx, ing); err != nil {
|
||||
return fmt.Errorf("failed to remove finalizer %q: %w", FinalizerNamePG, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pgIngressCMName(pg string) string {
|
||||
return fmt.Sprintf("%s-ingress-config", pg)
|
||||
}
|
||||
|
||||
func (a *IngressPGReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) {
|
||||
name := pgIngressCMName(pg)
|
||||
cm = &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: a.tsNamespace,
|
||||
},
|
||||
}
|
||||
if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, nil, fmt.Errorf("error retrieving ingress serve config ConfigMap %s: %v", name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
cfg = &ipn.ServeConfig{}
|
||||
if len(cm.BinaryData[serveConfigKey]) != 0 {
|
||||
if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil {
|
||||
return nil, nil, fmt.Errorf("error unmarshaling ingress serve config %v: %w", cm.BinaryData[serveConfigKey], err)
|
||||
}
|
||||
}
|
||||
return cm, cfg, nil
|
||||
}
|
||||
|
||||
type localClient interface {
|
||||
StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error)
|
||||
}
|
||||
|
||||
// tailnetCertDomain returns the base domain (TCD) of the current tailnet.
|
||||
func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, error) {
|
||||
st, err := a.lc.StatusWithoutPeers(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error getting tailscale status: %w", err)
|
||||
}
|
||||
return st.CurrentTailnet.MagicDNSSuffix, nil
|
||||
}
|
||||
|
||||
// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup)
|
||||
func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool {
|
||||
isTSIngress := ing != nil &&
|
||||
ing.Spec.IngressClassName != nil &&
|
||||
*ing.Spec.IngressClassName == tailscaleIngressClassName
|
||||
pgAnnot := ing.Annotations[AnnotationProxyGroup]
|
||||
return isTSIngress && pgAnnot != ""
|
||||
}
|
||||
|
||||
func (a *IngressPGReconciler) getVIPService(ctx context.Context, hostname string, logger *zap.SugaredLogger) (*VIPService, error) {
|
||||
svc, err := a.tsClient.getVIPServiceByName(ctx, hostname)
|
||||
if err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound {
|
||||
logger.Infof("error getting VIPService %q: %v", hostname, err)
|
||||
return nil, fmt.Errorf("error getting VIPService %q: %w", hostname, err)
|
||||
}
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func isVIPServiceForIngress(svc *VIPService, ing *networkingv1.Ingress) bool {
|
||||
if svc == nil || ing == nil {
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(svc.Comment, fmt.Sprintf(VIPSvcOwnerRef, ing.UID))
|
||||
}
|
||||
|
||||
func isVIPServiceForAnyIngress(svc *VIPService) bool {
|
||||
if svc == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(svc.Comment, "tailscale.com/k8s-operator:owned-by:")
|
||||
}
|
||||
|
||||
// validateIngress validates that the Ingress is properly configured.
|
||||
// Currently validates:
|
||||
// - Any tags provided via tailscale.com/tags annotation are valid Tailscale ACL tags
|
||||
// - The derived hostname is a valid DNS label
|
||||
// - The referenced ProxyGroup exists and is of type 'ingress'
|
||||
// - Ingress' TLS block is invalid
|
||||
func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error {
|
||||
var errs []error
|
||||
|
||||
// Validate tags if present
|
||||
if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||
tags := strings.Split(tstr, ",")
|
||||
for _, tag := range tags {
|
||||
tag = strings.TrimSpace(tag)
|
||||
if err := tailcfg.CheckTag(tag); err != nil {
|
||||
errs = append(errs, fmt.Errorf("tailscale.com/tags annotation contains invalid tag %q: %w", tag, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate TLS configuration
|
||||
if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) {
|
||||
errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS))
|
||||
}
|
||||
|
||||
// Validate that the hostname will be a valid DNS label
|
||||
hostname := hostnameForIngress(ing)
|
||||
if err := dnsname.ValidLabel(hostname); err != nil {
|
||||
errs = append(errs, fmt.Errorf("invalid hostname %q: %w. Ensure that the hostname is a valid DNS label", hostname, err))
|
||||
}
|
||||
|
||||
// Validate ProxyGroup type
|
||||
if pg.Spec.Type != tsapi.ProxyGroupTypeIngress {
|
||||
errs = append(errs, fmt.Errorf("ProxyGroup %q is of type %q but must be of type %q",
|
||||
pg.Name, pg.Spec.Type, tsapi.ProxyGroupTypeIngress))
|
||||
}
|
||||
|
||||
// Validate ProxyGroup readiness
|
||||
if !tsoperator.ProxyGroupIsReady(pg) {
|
||||
errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress.
|
||||
func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error {
|
||||
svc, err := a.getVIPService(ctx, name, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting VIPService: %w", err)
|
||||
}
|
||||
|
||||
// isVIPServiceForIngress handles nil svc, so we don't need to check it here
|
||||
if !isVIPServiceForIngress(svc, ing) {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Infof("Deleting VIPService %q", name)
|
||||
if err = a.tsClient.deleteVIPServiceByName(ctx, name); err != nil {
|
||||
return fmt.Errorf("error deleting VIPService: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
337
cmd/k8s-operator/ingress-for-pg_test.go
Normal file
337
cmd/k8s-operator/ingress-for-pg_test.go
Normal file
@ -0,0 +1,337 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"slices"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestIngressPGReconciler(t *testing.T) {
|
||||
tsIngressClass := &networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
||||
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
||||
}
|
||||
|
||||
// Pre-create the ProxyGroup
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pg",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupTypeIngress,
|
||||
},
|
||||
}
|
||||
|
||||
// Pre-create the ConfigMap for the ProxyGroup
|
||||
pgConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pg-ingress-config",
|
||||
Namespace: "operator-ns",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"serve-config.json": []byte(`{"Services":{}}`),
|
||||
},
|
||||
}
|
||||
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(pg, pgConfigMap, tsIngressClass).
|
||||
WithStatusSubresource(pg).
|
||||
Build()
|
||||
mustUpdateStatus(t, fc, "", pg.Name, func(pg *tsapi.ProxyGroup) {
|
||||
pg.Status.Conditions = []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
}
|
||||
})
|
||||
ft := &fakeTSClient{}
|
||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lc := &fakeLocalClient{
|
||||
status: &ipnstate.Status{
|
||||
CurrentTailnet: &ipnstate.TailnetStatus{
|
||||
MagicDNSSuffix: "ts.net",
|
||||
},
|
||||
},
|
||||
}
|
||||
ingPGR := &IngressPGReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
tsnetServer: fakeTsnetServer,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
tsNamespace: "operator-ns",
|
||||
logger: zl.Sugar(),
|
||||
recorder: record.NewFakeRecorder(10),
|
||||
lc: lc,
|
||||
}
|
||||
|
||||
// Test 1: Default tags
|
||||
ing := &networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ingress",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/proxy-group": "test-pg",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"my-svc.tailnetxyz.ts.net"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
mustCreate(t, fc, ing)
|
||||
|
||||
// Verify initial reconciliation
|
||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||
|
||||
// Get and verify the ConfigMap was updated
|
||||
cm := &corev1.ConfigMap{}
|
||||
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||
Name: "test-pg-ingress-config",
|
||||
Namespace: "operator-ns",
|
||||
}, cm); err != nil {
|
||||
t.Fatalf("getting ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
cfg := &ipn.ServeConfig{}
|
||||
if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil {
|
||||
t.Fatalf("unmarshaling serve config: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Services["my-svc"] == nil {
|
||||
t.Error("expected serve config to contain VIPService configuration")
|
||||
}
|
||||
|
||||
// Verify VIPService uses default tags
|
||||
vipSvc, err := ft.getVIPServiceByName(context.Background(), "my-svc")
|
||||
if err != nil {
|
||||
t.Fatalf("getting VIPService: %v", err)
|
||||
}
|
||||
if vipSvc == nil {
|
||||
t.Fatal("VIPService not created")
|
||||
}
|
||||
wantTags := []string{"tag:k8s"} // default tags
|
||||
if !slices.Equal(vipSvc.Tags, wantTags) {
|
||||
t.Errorf("incorrect VIPService tags: got %v, want %v", vipSvc.Tags, wantTags)
|
||||
}
|
||||
|
||||
// Test 2: Custom tags
|
||||
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
||||
ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test"
|
||||
})
|
||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||
|
||||
// Verify VIPService uses custom tags
|
||||
vipSvc, err = ft.getVIPServiceByName(context.Background(), "my-svc")
|
||||
if err != nil {
|
||||
t.Fatalf("getting VIPService: %v", err)
|
||||
}
|
||||
if vipSvc == nil {
|
||||
t.Fatal("VIPService not created")
|
||||
}
|
||||
wantTags = []string{"tag:custom", "tag:test"} // custom tags only
|
||||
gotTags := slices.Clone(vipSvc.Tags)
|
||||
slices.Sort(gotTags)
|
||||
slices.Sort(wantTags)
|
||||
if !slices.Equal(gotTags, wantTags) {
|
||||
t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags)
|
||||
}
|
||||
|
||||
// Delete the Ingress and verify cleanup
|
||||
if err := fc.Delete(context.Background(), ing); err != nil {
|
||||
t.Fatalf("deleting Ingress: %v", err)
|
||||
}
|
||||
|
||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||
|
||||
// Verify the ConfigMap was cleaned up
|
||||
cm = &corev1.ConfigMap{}
|
||||
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||
Name: "test-pg-ingress-config",
|
||||
Namespace: "operator-ns",
|
||||
}, cm); err != nil {
|
||||
t.Fatalf("getting ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
cfg = &ipn.ServeConfig{}
|
||||
if err := json.Unmarshal(cm.BinaryData[serveConfigKey], cfg); err != nil {
|
||||
t.Fatalf("unmarshaling serve config: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Services) > 0 {
|
||||
t.Error("serve config not cleaned up")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIngress(t *testing.T) {
|
||||
baseIngress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ingress",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
|
||||
readyProxyGroup := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pg",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupTypeIngress,
|
||||
},
|
||||
Status: tsapi.ProxyGroupStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ing *networkingv1.Ingress
|
||||
pg *tsapi.ProxyGroup
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "valid_ingress_with_hostname",
|
||||
ing: &networkingv1.Ingress{
|
||||
ObjectMeta: baseIngress.ObjectMeta,
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"test.example.com"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pg: readyProxyGroup,
|
||||
},
|
||||
{
|
||||
name: "valid_ingress_with_default_hostname",
|
||||
ing: baseIngress,
|
||||
pg: readyProxyGroup,
|
||||
},
|
||||
{
|
||||
name: "invalid_tags",
|
||||
ing: &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: baseIngress.Name,
|
||||
Namespace: baseIngress.Namespace,
|
||||
Annotations: map[string]string{
|
||||
AnnotationTags: "tag:invalid!",
|
||||
},
|
||||
},
|
||||
},
|
||||
pg: readyProxyGroup,
|
||||
wantErr: "tailscale.com/tags annotation contains invalid tag \"tag:invalid!\": tag names can only contain numbers, letters, or dashes",
|
||||
},
|
||||
{
|
||||
name: "multiple_TLS_entries",
|
||||
ing: &networkingv1.Ingress{
|
||||
ObjectMeta: baseIngress.ObjectMeta,
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"test1.example.com"}},
|
||||
{Hosts: []string{"test2.example.com"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pg: readyProxyGroup,
|
||||
wantErr: "Ingress contains invalid TLS block [{[test1.example.com] } {[test2.example.com] }]: only a single TLS entry with a single host is allowed",
|
||||
},
|
||||
{
|
||||
name: "multiple_hosts_in_TLS_entry",
|
||||
ing: &networkingv1.Ingress{
|
||||
ObjectMeta: baseIngress.ObjectMeta,
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{Hosts: []string{"test1.example.com", "test2.example.com"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pg: readyProxyGroup,
|
||||
wantErr: "Ingress contains invalid TLS block [{[test1.example.com test2.example.com] }]: only a single TLS entry with a single host is allowed",
|
||||
},
|
||||
{
|
||||
name: "wrong_proxy_group_type",
|
||||
ing: baseIngress,
|
||||
pg: &tsapi.ProxyGroup{
|
||||
ObjectMeta: readyProxyGroup.ObjectMeta,
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupType("foo"),
|
||||
},
|
||||
Status: readyProxyGroup.Status,
|
||||
},
|
||||
wantErr: "ProxyGroup \"test-pg\" is of type \"foo\" but must be of type \"ingress\"",
|
||||
},
|
||||
{
|
||||
name: "proxy_group_not_ready",
|
||||
ing: baseIngress,
|
||||
pg: &tsapi.ProxyGroup{
|
||||
ObjectMeta: readyProxyGroup.ObjectMeta,
|
||||
Spec: readyProxyGroup.Spec,
|
||||
Status: tsapi.ProxyGroupStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(tsapi.ProxyGroupReady),
|
||||
Status: metav1.ConditionFalse,
|
||||
ObservedGeneration: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "ProxyGroup \"test-pg\" is not ready",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &IngressPGReconciler{}
|
||||
err := r.validateIngress(tt.ing, tt.pg)
|
||||
if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) {
|
||||
t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -26,6 +26,7 @@ import (
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
@ -58,7 +59,7 @@ var (
|
||||
)
|
||||
|
||||
func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
||||
logger := a.logger.With("ingress-ns", req.Namespace, "ingress-name", req.Name)
|
||||
logger := a.logger.With("Ingress", req.NamespacedName)
|
||||
logger.Debugf("starting reconcile")
|
||||
defer logger.Debugf("reconcile finished")
|
||||
|
||||
@ -128,9 +129,8 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
||||
// This function adds a finalizer to ing, ensuring that we can handle orderly
|
||||
// deprovisioning later.
|
||||
func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error {
|
||||
if err := a.validateIngressClass(ctx); err != nil {
|
||||
if err := validateIngressClass(ctx, a.Client); err != nil {
|
||||
logger.Warnf("error validating tailscale IngressClass: %v. In future this might be a terminal error.", err)
|
||||
|
||||
}
|
||||
if !slices.Contains(ing.Finalizers, FinalizerName) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
@ -159,7 +159,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
gaugeIngressResources.Set(int64(a.managedIngresses.Len()))
|
||||
a.mu.Unlock()
|
||||
|
||||
if !a.ssr.IsHTTPSEnabledOnTailnet() {
|
||||
if !IsHTTPSEnabledOnTailnet(a.ssr.tsnetServer) {
|
||||
a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work")
|
||||
}
|
||||
|
||||
@ -185,73 +185,16 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
}
|
||||
|
||||
web := sc.Web[magic443]
|
||||
addIngressBackend := func(b *networkingv1.IngressBackend, path string) {
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
if b.Service == nil {
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q is missing service", path)
|
||||
return
|
||||
}
|
||||
var svc corev1.Service
|
||||
if err := a.Get(ctx, types.NamespacedName{Namespace: ing.Namespace, Name: b.Service.Name}, &svc); err != nil {
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "failed to get service %q for path %q: %v", b.Service.Name, path, err)
|
||||
return
|
||||
}
|
||||
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid ClusterIP", path)
|
||||
return
|
||||
}
|
||||
var port int32
|
||||
if b.Service.Port.Name != "" {
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.Name == b.Service.Port.Name {
|
||||
port = p.Port
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
port = b.Service.Port.Number
|
||||
}
|
||||
if port == 0 {
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid port", path)
|
||||
return
|
||||
}
|
||||
proto := "http://"
|
||||
if port == 443 || b.Service.Port.Name == "https" {
|
||||
proto = "https+insecure://"
|
||||
}
|
||||
web.Handlers[path] = &ipn.HTTPHandler{
|
||||
Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path,
|
||||
}
|
||||
}
|
||||
addIngressBackend(ing.Spec.DefaultBackend, "/")
|
||||
|
||||
var tlsHost string // hostname or FQDN or empty
|
||||
if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 {
|
||||
tlsHost = ing.Spec.TLS[0].Hosts[0]
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
// Host is optional, but if it's present it must match the TLS host
|
||||
// otherwise we ignore the rule.
|
||||
if rule.Host != "" && rule.Host != tlsHost {
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "rule with host %q ignored, unsupported", rule.Host)
|
||||
continue
|
||||
}
|
||||
for _, p := range rule.HTTP.Paths {
|
||||
// Send a warning if folks use Exact path type - to make
|
||||
// it easier for us to support Exact path type matching
|
||||
// in the future if needed.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
|
||||
if *p.PathType == networkingv1.PathTypeExact {
|
||||
msg := "Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future."
|
||||
logger.Warnf(fmt.Sprintf("Unsupported Path type exact for path %s. %s", p.Path, msg))
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "UnsupportedPathTypeExact", msg)
|
||||
}
|
||||
addIngressBackend(&p.Backend, p.Path)
|
||||
}
|
||||
handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, tlsHost, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get handlers for ingress: %w", err)
|
||||
}
|
||||
|
||||
web.Handlers = handlers
|
||||
if len(web.Handlers) == 0 {
|
||||
logger.Warn("Ingress contains no valid backends")
|
||||
a.recorder.Eventf(ing, corev1.EventTypeWarning, "NoValidBackends", "no valid backends")
|
||||
@ -263,10 +206,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||
tags = strings.Split(tstr, ",")
|
||||
}
|
||||
hostname := ing.Namespace + "-" + ing.Name + "-ingress"
|
||||
if tlsHost != "" {
|
||||
hostname, _, _ = strings.Cut(tlsHost, ".")
|
||||
}
|
||||
hostname := hostnameForIngress(ing)
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
Hostname: hostname,
|
||||
@ -322,28 +262,106 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool {
|
||||
return ing != nil &&
|
||||
ing.Spec.IngressClassName != nil &&
|
||||
*ing.Spec.IngressClassName == tailscaleIngressClassName
|
||||
*ing.Spec.IngressClassName == tailscaleIngressClassName &&
|
||||
ing.Annotations[AnnotationProxyGroup] == ""
|
||||
}
|
||||
|
||||
// validateIngressClass attempts to validate that 'tailscale' IngressClass
|
||||
// included in Tailscale installation manifests exists and has not been modified
|
||||
// to attempt to enable features that we do not support.
|
||||
func (a *IngressReconciler) validateIngressClass(ctx context.Context) error {
|
||||
func validateIngressClass(ctx context.Context, cl client.Client) error {
|
||||
ic := &networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tailscaleIngressClassName,
|
||||
},
|
||||
}
|
||||
if err := a.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) {
|
||||
return errors.New("Tailscale IngressClass not found in cluster. Latest installation manifests include a tailscale IngressClass - please update")
|
||||
if err := cl.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) {
|
||||
return errors.New("'tailscale' IngressClass not found in cluster.")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error retrieving 'tailscale' IngressClass: %w", err)
|
||||
}
|
||||
if ic.Spec.Controller != tailscaleIngressControllerName {
|
||||
return fmt.Errorf("Tailscale Ingress class controller name %s does not match tailscale Ingress controller name %s. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ic.Spec.Controller, tailscaleIngressControllerName)
|
||||
return fmt.Errorf("'tailscale' Ingress class controller name %s does not match tailscale Ingress controller name %s. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ic.Spec.Controller, tailscaleIngressControllerName)
|
||||
}
|
||||
if ic.GetAnnotations()[ingressClassDefaultAnnotation] != "" {
|
||||
return fmt.Errorf("%s annotation is set on 'tailscale' IngressClass, but Tailscale Ingress controller does not support default Ingress class. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests", ingressClassDefaultAnnotation)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl client.Client, rec record.EventRecorder, tlsHost string, logger *zap.SugaredLogger) (handlers map[string]*ipn.HTTPHandler, err error) {
|
||||
addIngressBackend := func(b *networkingv1.IngressBackend, path string) {
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
if b.Service == nil {
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q is missing service", path)
|
||||
return
|
||||
}
|
||||
var svc corev1.Service
|
||||
if err := cl.Get(ctx, types.NamespacedName{Namespace: ing.Namespace, Name: b.Service.Name}, &svc); err != nil {
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "failed to get service %q for path %q: %v", b.Service.Name, path, err)
|
||||
return
|
||||
}
|
||||
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid ClusterIP", path)
|
||||
return
|
||||
}
|
||||
var port int32
|
||||
if b.Service.Port.Name != "" {
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.Name == b.Service.Port.Name {
|
||||
port = p.Port
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
port = b.Service.Port.Number
|
||||
}
|
||||
if port == 0 {
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "backend for path %q has invalid port", path)
|
||||
return
|
||||
}
|
||||
proto := "http://"
|
||||
if port == 443 || b.Service.Port.Name == "https" {
|
||||
proto = "https+insecure://"
|
||||
}
|
||||
mak.Set(&handlers, path, &ipn.HTTPHandler{
|
||||
Proxy: proto + svc.Spec.ClusterIP + ":" + fmt.Sprint(port) + path,
|
||||
})
|
||||
}
|
||||
addIngressBackend(ing.Spec.DefaultBackend, "/")
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
// Host is optional, but if it's present it must match the TLS host
|
||||
// otherwise we ignore the rule.
|
||||
if rule.Host != "" && rule.Host != tlsHost {
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "InvalidIngressBackend", "rule with host %q ignored, unsupported", rule.Host)
|
||||
continue
|
||||
}
|
||||
for _, p := range rule.HTTP.Paths {
|
||||
// Send a warning if folks use Exact path type - to make
|
||||
// it easier for us to support Exact path type matching
|
||||
// in the future if needed.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
|
||||
if *p.PathType == networkingv1.PathTypeExact {
|
||||
msg := "Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future."
|
||||
logger.Warnf(fmt.Sprintf("Unsupported Path type exact for path %s. %s", p.Path, msg))
|
||||
rec.Eventf(ing, corev1.EventTypeWarning, "UnsupportedPathTypeExact", msg)
|
||||
}
|
||||
addIngressBackend(&p.Backend, p.Path)
|
||||
}
|
||||
}
|
||||
return handlers, nil
|
||||
}
|
||||
|
||||
// hostnameForIngress returns the hostname for an Ingress resource.
|
||||
// If the Ingress has TLS configured with a host, it returns the first component of that host.
|
||||
// Otherwise, it returns a hostname derived from the Ingress name and namespace.
|
||||
func hostnameForIngress(ing *networkingv1.Ingress) string {
|
||||
if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && len(ing.Spec.TLS[0].Hosts) > 0 {
|
||||
h := ing.Spec.TLS[0].Hosts[0]
|
||||
hostname, _, _ := strings.Cut(h, ".")
|
||||
return hostname
|
||||
}
|
||||
return ing.Namespace + "-" + ing.Name + "-ingress"
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"github.com/go-logr/zapr"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
@ -107,14 +106,14 @@ func main() {
|
||||
hostinfo.SetApp(kubetypes.AppAPIServerProxy)
|
||||
}
|
||||
|
||||
s, tsClient := initTSNet(zlog)
|
||||
s, tsc := initTSNet(zlog)
|
||||
defer s.Close()
|
||||
restConfig := config.GetConfigOrDie()
|
||||
maybeLaunchAPIServerProxy(zlog, restConfig, s, mode)
|
||||
rOpts := reconcilerOpts{
|
||||
log: zlog,
|
||||
tsServer: s,
|
||||
tsClient: tsClient,
|
||||
tsClient: tsc,
|
||||
tailscaleNamespace: tsNamespace,
|
||||
restConfig: restConfig,
|
||||
proxyImage: image,
|
||||
@ -130,7 +129,7 @@ func main() {
|
||||
// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the
|
||||
// CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate
|
||||
// with Tailscale.
|
||||
func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) {
|
||||
func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) {
|
||||
var (
|
||||
clientIDPath = defaultEnv("CLIENT_ID_FILE", "")
|
||||
clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "")
|
||||
@ -142,23 +141,10 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) {
|
||||
if clientIDPath == "" || clientSecretPath == "" {
|
||||
startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set")
|
||||
}
|
||||
clientID, err := os.ReadFile(clientIDPath)
|
||||
tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath)
|
||||
if err != nil {
|
||||
startlog.Fatalf("reading client ID %q: %v", clientIDPath, err)
|
||||
startlog.Fatalf("error creating Tailscale client: %v", err)
|
||||
}
|
||||
clientSecret, err := os.ReadFile(clientSecretPath)
|
||||
if err != nil {
|
||||
startlog.Fatalf("reading client secret %q: %v", clientSecretPath, err)
|
||||
}
|
||||
credentials := clientcredentials.Config{
|
||||
ClientID: string(clientID),
|
||||
ClientSecret: string(clientSecret),
|
||||
TokenURL: "https://login.tailscale.com/api/v2/oauth/token",
|
||||
}
|
||||
tsClient := tailscale.NewClient("-", nil)
|
||||
tsClient.UserAgent = "tailscale-k8s-operator"
|
||||
tsClient.HTTPClient = credentials.Client(context.Background())
|
||||
|
||||
s := &tsnet.Server{
|
||||
Hostname: hostname,
|
||||
Logf: zlog.Named("tailscaled").Debugf,
|
||||
@ -211,7 +197,7 @@ waitOnline:
|
||||
},
|
||||
},
|
||||
}
|
||||
authkey, _, err := tsClient.CreateKey(ctx, caps)
|
||||
authkey, _, err := tsc.CreateKey(ctx, caps)
|
||||
if err != nil {
|
||||
startlog.Fatalf("creating operator authkey: %v", err)
|
||||
}
|
||||
@ -235,7 +221,7 @@ waitOnline:
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return s, tsClient
|
||||
return s, tsc
|
||||
}
|
||||
|
||||
// runReconcilers starts the controller-runtime manager and registers the
|
||||
@ -343,6 +329,27 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create ingress reconciler: %v", err)
|
||||
}
|
||||
lc, err := opts.tsServer.LocalClient()
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not get local client: %v", err)
|
||||
}
|
||||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
For(&networkingv1.Ingress{}).
|
||||
Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))).
|
||||
Complete(&IngressPGReconciler{
|
||||
recorder: eventRecorder,
|
||||
tsClient: opts.tsClient,
|
||||
tsnetServer: opts.tsServer,
|
||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("ingress-pg-reconciler"),
|
||||
lc: lc,
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create ingress-pg-reconciler: %v", err)
|
||||
}
|
||||
|
||||
connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
||||
// If a ProxyClassChanges, enqueue all Connectors that have
|
||||
@ -514,6 +521,7 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
For(&tsapi.ProxyGroup{}).
|
||||
Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.Secret{}, ownedByProxyGroupFilter).
|
||||
Watches(&rbacv1.Role{}, ownedByProxyGroupFilter).
|
||||
@ -545,7 +553,7 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
type reconcilerOpts struct {
|
||||
log *zap.SugaredLogger
|
||||
tsServer *tsnet.Server
|
||||
tsClient *tailscale.Client
|
||||
tsClient tsClient
|
||||
tailscaleNamespace string // namespace in which operator resources will be deployed
|
||||
restConfig *rest.Config // config for connecting to the kube API server
|
||||
proxyImage string // <proxy-image-repo>:<proxy-image-tag>
|
||||
@ -670,12 +678,6 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c
|
||||
}
|
||||
}
|
||||
|
||||
type tsClient interface {
|
||||
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
|
||||
Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error)
|
||||
DeleteDevice(ctx context.Context, nodeStableID string) error
|
||||
}
|
||||
|
||||
func isManagedResource(o client.Object) bool {
|
||||
ls := o.GetLabels()
|
||||
return ls[LabelManaged] == "true"
|
||||
@ -811,6 +813,10 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl
|
||||
if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName {
|
||||
return nil
|
||||
}
|
||||
if hasProxyGroupAnnotation(&ing) {
|
||||
// We don't want to reconcile backend Services for Ingresses for ProxyGroups.
|
||||
continue
|
||||
}
|
||||
if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)})
|
||||
}
|
||||
@ -1094,3 +1100,44 @@ func indexEgressServices(o client.Object) []string {
|
||||
}
|
||||
return []string{o.GetAnnotations()[AnnotationProxyGroup]}
|
||||
}
|
||||
|
||||
// serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service
|
||||
// associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation,
|
||||
// the associated Ingress gets reconciled.
|
||||
func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
ingList := networkingv1.IngressList{}
|
||||
if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil {
|
||||
logger.Debugf("error listing Ingresses: %v", err)
|
||||
return nil
|
||||
}
|
||||
reqs := make([]reconcile.Request, 0)
|
||||
for _, ing := range ingList.Items {
|
||||
if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName {
|
||||
continue
|
||||
}
|
||||
if !hasProxyGroupAnnotation(&ing) {
|
||||
continue
|
||||
}
|
||||
if ing.Spec.DefaultBackend != nil && ing.Spec.DefaultBackend.Service != nil && ing.Spec.DefaultBackend.Service.Name == o.GetName() {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)})
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
continue
|
||||
}
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
if path.Backend.Service != nil && path.Backend.Service.Name == o.GetName() {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return reqs
|
||||
}
|
||||
}
|
||||
|
||||
func hasProxyGroupAnnotation(obj client.Object) bool {
|
||||
ing := obj.(*networkingv1.Ingress)
|
||||
return ing.Annotations[AnnotationProxyGroup] != ""
|
||||
}
|
||||
|
@ -258,7 +258,16 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
existing.ObjectMeta.Labels = cm.ObjectMeta.Labels
|
||||
existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error provisioning ConfigMap: %w", err)
|
||||
return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||
cm := pgIngressCM(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||
existing.ObjectMeta.Labels = cm.ObjectMeta.Labels
|
||||
existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode)
|
||||
|
@ -56,6 +56,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
}
|
||||
tmpl.Spec.ServiceAccountName = pg.Name
|
||||
tmpl.Spec.InitContainers[0].Image = image
|
||||
proxyConfigVolName := pgEgressCMName(pg.Name)
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||
proxyConfigVolName = pgIngressCMName(pg.Name)
|
||||
}
|
||||
tmpl.Spec.Volumes = func() []corev1.Volume {
|
||||
var volumes []corev1.Volume
|
||||
for i := range pgReplicas(pg) {
|
||||
@ -69,18 +73,16 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
})
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: pgEgressCMName(pg.Name),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: pgEgressCMName(pg.Name),
|
||||
},
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: proxyConfigVolName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: proxyConfigVolName,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return volumes
|
||||
}()
|
||||
@ -102,13 +104,11 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
})
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||
mounts = append(mounts, corev1.VolumeMount{
|
||||
Name: pgEgressCMName(pg.Name),
|
||||
MountPath: "/etc/proxies",
|
||||
ReadOnly: true,
|
||||
})
|
||||
}
|
||||
mounts = append(mounts, corev1.VolumeMount{
|
||||
Name: proxyConfigVolName,
|
||||
MountPath: "/etc/proxies",
|
||||
ReadOnly: true,
|
||||
})
|
||||
|
||||
return mounts
|
||||
}()
|
||||
@ -154,11 +154,15 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
Value: kubetypes.AppProxyGroupEgress,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
} else { // ingress
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: "TS_INTERNAL_APP",
|
||||
Value: kubetypes.AppProxyGroupIngress,
|
||||
})
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_SERVE_CONFIG",
|
||||
Value: fmt.Sprintf("/etc/proxies/%s", serveConfigKey),
|
||||
})
|
||||
}
|
||||
return append(c.Env, envs...)
|
||||
}()
|
||||
@ -264,6 +268,16 @@ func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap {
|
||||
},
|
||||
}
|
||||
}
|
||||
func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pgIngressCMName(pg.Name),
|
||||
Namespace: namespace,
|
||||
Labels: pgLabels(pg.Name, nil),
|
||||
OwnerReferences: pgOwnerReference(pg),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func pgSecretLabels(pgName, typ string) map[string]string {
|
||||
return pgLabels(pgName, map[string]string{
|
||||
|
@ -332,7 +332,8 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
UID: "test-ingress-uid",
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupTypeIngress,
|
||||
Type: tsapi.ProxyGroupTypeIngress,
|
||||
Replicas: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
if err := fc.Create(context.Background(), pg); err != nil {
|
||||
@ -347,6 +348,34 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress)
|
||||
verifyEnvVar(t, sts, "TS_SERVE_CONFIG", "/etc/proxies/serve-config.json")
|
||||
|
||||
// Verify ConfigMap volume mount
|
||||
cmName := fmt.Sprintf("%s-ingress-config", pg.Name)
|
||||
expectedVolume := corev1.Volume{
|
||||
Name: cmName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cmName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedVolumeMount := corev1.VolumeMount{
|
||||
Name: cmName,
|
||||
MountPath: "/etc/proxies",
|
||||
ReadOnly: true,
|
||||
}
|
||||
|
||||
if diff := cmp.Diff([]corev1.Volume{expectedVolume}, sts.Spec.Template.Spec.Volumes); diff != "" {
|
||||
t.Errorf("unexpected volumes (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff([]corev1.VolumeMount{expectedVolumeMount}, sts.Spec.Template.Spec.Containers[0].VolumeMounts); diff != "" {
|
||||
t.Errorf("unexpected volume mounts (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -172,8 +172,8 @@ func (sts tailscaleSTSReconciler) validate() error {
|
||||
}
|
||||
|
||||
// IsHTTPSEnabledOnTailnet reports whether HTTPS is enabled on the tailnet.
|
||||
func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool {
|
||||
return len(a.tsnetServer.CertDomains()) > 0
|
||||
func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool {
|
||||
return len(tsnetServer.CertDomains()) > 0
|
||||
}
|
||||
|
||||
// Provision ensures that the StatefulSet for the given service is running and
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
@ -737,6 +739,7 @@ type fakeTSClient struct {
|
||||
sync.Mutex
|
||||
keyRequests []tailscale.KeyCapabilities
|
||||
deleted []string
|
||||
vipServices map[string]*VIPService
|
||||
}
|
||||
type fakeTSNetServer struct {
|
||||
certDomains []string
|
||||
@ -842,3 +845,50 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices == nil {
|
||||
return nil, &tailscale.ErrResponse{Status: http.StatusNotFound}
|
||||
}
|
||||
svc, ok := c.vipServices[name]
|
||||
if !ok {
|
||||
return nil, &tailscale.ErrResponse{Status: http.StatusNotFound}
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices == nil {
|
||||
c.vipServices = make(map[string]*VIPService)
|
||||
}
|
||||
c.vipServices[svc.Name] = svc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeTSClient) deleteVIPServiceByName(ctx context.Context, name string) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.vipServices != nil {
|
||||
delete(c.vipServices, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeLocalClient struct {
|
||||
status *ipnstate.Status
|
||||
}
|
||||
|
||||
func (f *fakeLocalClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) {
|
||||
if f.status == nil {
|
||||
return &ipnstate.Status{
|
||||
Self: &ipnstate.PeerStatus{
|
||||
DNSName: "test-node.test.ts.net.",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return f.status, nil
|
||||
}
|
||||
|
185
cmd/k8s-operator/tsclient.go
Normal file
185
cmd/k8s-operator/tsclient.go
Normal file
@ -0,0 +1,185 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
// defaultTailnet is a value that can be used in Tailscale API calls instead of tailnet name to indicate that the API
|
||||
// call should be performed on the default tailnet for the provided credentials.
|
||||
const (
|
||||
defaultTailnet = "-"
|
||||
defaultBaseURL = "https://api.tailscale.com"
|
||||
)
|
||||
|
||||
func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (tsClient, error) {
|
||||
clientID, err := os.ReadFile(clientIDPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err)
|
||||
}
|
||||
clientSecret, err := os.ReadFile(clientSecretPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err)
|
||||
}
|
||||
credentials := clientcredentials.Config{
|
||||
ClientID: string(clientID),
|
||||
ClientSecret: string(clientSecret),
|
||||
TokenURL: "https://login.tailscale.com/api/v2/oauth/token",
|
||||
}
|
||||
c := tailscale.NewClient(defaultTailnet, nil)
|
||||
c.UserAgent = "tailscale-k8s-operator"
|
||||
c.HTTPClient = credentials.Client(ctx)
|
||||
tsc := &tsClientImpl{
|
||||
Client: c,
|
||||
baseURL: defaultBaseURL,
|
||||
tailnet: defaultTailnet,
|
||||
}
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
type tsClient interface {
|
||||
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
|
||||
Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error)
|
||||
DeleteDevice(ctx context.Context, nodeStableID string) error
|
||||
getVIPServiceByName(ctx context.Context, name string) (*VIPService, error)
|
||||
createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error
|
||||
deleteVIPServiceByName(ctx context.Context, name string) error
|
||||
}
|
||||
|
||||
type tsClientImpl struct {
|
||||
*tailscale.Client
|
||||
baseURL string
|
||||
tailnet string
|
||||
}
|
||||
|
||||
// VIPService is a Tailscale VIPService with Tailscale API JSON representation.
|
||||
type VIPService struct {
|
||||
// Name is the leftmost label of the DNS name of the VIP service.
|
||||
// Name is required.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Addrs are the IP addresses of the VIP Service. There are two addresses:
|
||||
// the first is IPv4 and the second is IPv6.
|
||||
// When creating a new VIP Service, the IP addresses are optional: if no
|
||||
// addresses are specified then they will be selected. If an IPv4 address is
|
||||
// specified at index 0, then that address will attempt to be used. An IPv6
|
||||
// address can not be specified upon creation.
|
||||
Addrs []string `json:"addrs,omitempty"`
|
||||
// Comment is an optional text string for display in the admin panel.
|
||||
Comment string `json:"comment,omitempty"`
|
||||
// Ports are the ports of a VIPService that will be configured via Tailscale serve config.
|
||||
// If set, any node wishing to advertise this VIPService must have this port configured via Tailscale serve.
|
||||
Ports []string `json:"ports,omitempty"`
|
||||
// Tags are optional ACL tags that will be applied to the VIPService.
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// GetVIPServiceByName retrieves a VIPService by its name. It returns 404 if the VIPService is not found.
|
||||
func (c *tsClientImpl) getVIPServiceByName(ctx context.Context, name string) (*VIPService, error) {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
}
|
||||
b, resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error making Tailsale API request: %w", err)
|
||||
}
|
||||
// If status code was not successful, return the error.
|
||||
// TODO: Change the check for the StatusCode to include other 2XX success codes.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, handleErrorResponse(b, resp)
|
||||
}
|
||||
svc := &VIPService{}
|
||||
if err := json.Unmarshal(b, svc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdateVIPServiceByName creates or updates a VIPService by its name. Caller must ensure that, if the
|
||||
// VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not
|
||||
// lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were
|
||||
// auto-allocated by Tailscale) any subsequent request to this function that does not set any IP addresses will error.
|
||||
func (c *tsClientImpl) createOrUpdateVIPServiceByName(ctx context.Context, svc *VIPService) error {
|
||||
data, err := json.Marshal(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(svc.Name))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.PUT, path, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
}
|
||||
b, resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making Tailscale API request: %w", err)
|
||||
}
|
||||
// If status code was not successful, return the error.
|
||||
// TODO: Change the check for the StatusCode to include other 2XX success codes.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return handleErrorResponse(b, resp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteVIPServiceByName deletes a VIPService by its name. It returns an error if the VIPService
|
||||
// does not exist or if the deletion fails.
|
||||
func (c *tsClientImpl) deleteVIPServiceByName(ctx context.Context, name string) error {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/vip-services/by-name/%s", c.baseURL, c.tailnet, url.PathEscape(name))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new HTTP request: %w", err)
|
||||
}
|
||||
b, resp, err := c.sendRequest(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making Tailscale API request: %w", err)
|
||||
}
|
||||
// If status code was not successful, return the error.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return handleErrorResponse(b, resp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendRequest add the authentication key to the request and sends it. It
|
||||
// receives the response and reads up to 10MB of it.
|
||||
func (c *tsClientImpl) sendRequest(req *http.Request) ([]byte, *http.Response, error) {
|
||||
resp, err := c.Do(req)
|
||||
if err != nil {
|
||||
return nil, resp, fmt.Errorf("error actually doing request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read response
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
return b, resp, err
|
||||
}
|
||||
|
||||
// handleErrorResponse decodes the error message from the server and returns
|
||||
// an ErrResponse from it.
|
||||
func handleErrorResponse(b []byte, resp *http.Response) error {
|
||||
var errResp tailscale.ErrResponse
|
||||
if err := json.Unmarshal(b, &errResp); err != nil {
|
||||
return err
|
||||
}
|
||||
errResp.Status = resp.StatusCode
|
||||
return errResp
|
||||
}
|
@ -15,8 +15,9 @@ const (
|
||||
AppProxyGroupIngress = "k8s-operator-proxygroup-ingress"
|
||||
|
||||
// Clientmetrics for Tailscale Kubernetes Operator components
|
||||
MetricIngressProxyCount = "k8s_ingress_proxies" // L3
|
||||
MetricIngressResourceCount = "k8s_ingress_resources" // L7
|
||||
MetricIngressProxyCount = "k8s_ingress_proxies" // L3
|
||||
MetricIngressResourceCount = "k8s_ingress_resources" // L7
|
||||
MetricIngressPGResourceCount = "k8s_ingress_pg_resources" // L7 on ProxyGroup
|
||||
MetricEgressProxyCount = "k8s_egress_proxies"
|
||||
MetricConnectorResourceCount = "k8s_connector_resources"
|
||||
MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
|
||||
|
Loading…
x
Reference in New Issue
Block a user