2023-08-24 19:18:17 +00:00
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"context"
"fmt"
2023-10-04 02:12:37 +00:00
"slices"
2023-08-24 19:18:17 +00:00
"strings"
2023-08-30 16:49:11 +00:00
"sync"
2023-08-24 19:18:17 +00:00
2024-01-16 12:48:15 +00:00
"github.com/pkg/errors"
2023-08-24 19:18:17 +00:00
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
2024-01-16 12:48:15 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2023-08-24 19:18:17 +00:00
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/ipn"
"tailscale.com/types/opt"
2023-08-30 16:49:11 +00:00
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
2023-08-24 19:18:17 +00:00
)
2024-01-16 12:48:15 +00:00
const (
tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource
tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource
ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class
)
2023-08-24 19:18:17 +00:00
type IngressReconciler struct {
client . Client
recorder record . EventRecorder
ssr * tailscaleSTSReconciler
logger * zap . SugaredLogger
2023-08-30 16:49:11 +00:00
mu sync . Mutex // protects following
// managedIngresses is a set of all ingress resources that we're currently
// managing. This is only used for metrics.
managedIngresses set . Slice [ types . UID ]
2023-08-24 19:18:17 +00:00
}
2023-08-30 16:49:11 +00:00
var (
// gaugeIngressResources tracks the number of ingress resources that we're
// currently managing.
gaugeIngressResources = clientmetric . NewGauge ( "k8s_ingress_resources" )
)
2023-08-24 19:18:17 +00:00
func ( a * IngressReconciler ) Reconcile ( ctx context . Context , req reconcile . Request ) ( _ reconcile . Result , err error ) {
logger := a . logger . With ( "ingress-ns" , req . Namespace , "ingress-name" , req . Name )
logger . Debugf ( "starting reconcile" )
defer logger . Debugf ( "reconcile finished" )
ing := new ( networkingv1 . Ingress )
err = a . Get ( ctx , req . NamespacedName , ing )
if apierrors . IsNotFound ( err ) {
// Request object not found, could have been deleted after reconcile request.
logger . Debugf ( "ingress not found, assuming it was deleted" )
return reconcile . Result { } , nil
} else if err != nil {
return reconcile . Result { } , fmt . Errorf ( "failed to get ing: %w" , err )
}
if ! ing . DeletionTimestamp . IsZero ( ) || ! a . shouldExpose ( ing ) {
logger . Debugf ( "ingress is being deleted or should not be exposed, cleaning up" )
return reconcile . Result { } , a . maybeCleanup ( ctx , logger , ing )
}
return reconcile . Result { } , a . maybeProvision ( ctx , logger , ing )
}
func ( a * IngressReconciler ) maybeCleanup ( ctx context . Context , logger * zap . SugaredLogger , ing * networkingv1 . Ingress ) error {
ix := slices . Index ( ing . Finalizers , FinalizerName )
if ix < 0 {
logger . Debugf ( "no finalizer, nothing to do" )
2023-08-30 16:49:11 +00:00
a . mu . Lock ( )
defer a . mu . Unlock ( )
a . managedIngresses . Remove ( ing . UID )
gaugeIngressResources . Set ( int64 ( a . managedIngresses . Len ( ) ) )
2023-08-24 19:18:17 +00:00
return nil
}
if done , err := a . ssr . Cleanup ( ctx , logger , childResourceLabels ( ing . Name , ing . Namespace , "ingress" ) ) ; err != nil {
return fmt . Errorf ( "failed to cleanup: %w" , err )
} else if ! done {
logger . Debugf ( "cleanup not done yet, waiting for next reconcile" )
return nil
}
ing . Finalizers = append ( ing . Finalizers [ : ix ] , ing . Finalizers [ ix + 1 : ] ... )
if err := a . Update ( ctx , ing ) ; err != nil {
return fmt . Errorf ( "failed to remove finalizer: %w" , err )
}
// Unlike most log entries in the reconcile loop, this will get printed
// exactly once at the very end of cleanup, because the final step of
// cleanup removes the tailscale finalizer, which will make all future
// reconciles exit early.
logger . Infof ( "unexposed ingress from tailnet" )
2023-08-30 16:49:11 +00:00
a . mu . Lock ( )
defer a . mu . Unlock ( )
a . managedIngresses . Remove ( ing . UID )
gaugeIngressResources . Set ( int64 ( a . managedIngresses . Len ( ) ) )
2023-08-24 19:18:17 +00:00
return nil
}
// maybeProvision ensures that ing is exposed over tailscale, taking any actions
// necessary to reach that state.
//
// This function adds a finalizer to ing, ensuring that we can handle orderly
// deprovisioning later.
func ( a * IngressReconciler ) maybeProvision ( ctx context . Context , logger * zap . SugaredLogger , ing * networkingv1 . Ingress ) error {
2024-01-16 12:48:15 +00:00
if err := a . validateIngressClass ( ctx ) ; err != nil {
logger . Warnf ( "error validating tailscale IngressClass: %v. In future this might be a terminal error." , err )
}
2023-08-24 19:18:17 +00:00
if ! slices . Contains ( ing . Finalizers , FinalizerName ) {
// This log line is printed exactly once during initial provisioning,
// because once the finalizer is in place this block gets skipped. So,
// this is a nice place to tell the operator that the high level,
// multi-reconcile operation is underway.
logger . Infof ( "exposing ingress over tailscale" )
ing . Finalizers = append ( ing . Finalizers , FinalizerName )
if err := a . Update ( ctx , ing ) ; err != nil {
return fmt . Errorf ( "failed to add finalizer: %w" , err )
}
}
2024-02-13 05:27:54 +00:00
proxyClass := proxyClassForObject ( ing )
if proxyClass != "" {
if ready , err := proxyClassIsReady ( ctx , proxyClass , a . Client ) ; err != nil {
return fmt . Errorf ( "error verifying ProxyClass for Ingress: %w" , err )
} else if ! ready {
logger . Infof ( "ProxyClass %s specified for the Ingress, but is not (yet) Ready, waiting.." , proxyClass )
return nil
}
}
2023-08-30 16:49:11 +00:00
a . mu . Lock ( )
a . managedIngresses . Add ( ing . UID )
gaugeIngressResources . Set ( int64 ( a . managedIngresses . Len ( ) ) )
a . mu . Unlock ( )
2023-08-24 19:18:17 +00:00
2023-08-30 17:37:51 +00:00
if ! a . ssr . IsHTTPSEnabledOnTailnet ( ) {
a . recorder . Event ( ing , corev1 . EventTypeWarning , "HTTPSNotEnabled" , "HTTPS is not enabled on the tailnet; ingress may not work" )
}
2023-08-24 19:18:17 +00:00
// magic443 is a fake hostname that we can use to tell containerboot to swap
// out with the real hostname once it's known.
const magic443 = "${TS_CERT_DOMAIN}:443"
sc := & ipn . ServeConfig {
TCP : map [ uint16 ] * ipn . TCPPortHandler {
443 : {
HTTPS : true ,
} ,
} ,
Web : map [ ipn . HostPort ] * ipn . WebServerConfig {
magic443 : {
Handlers : map [ string ] * ipn . HTTPHandler { } ,
} ,
} ,
}
if opt . Bool ( ing . Annotations [ AnnotationFunnel ] ) . EqualBool ( true ) {
sc . AllowFunnel = map [ ipn . HostPort ] bool {
magic443 : true ,
}
}
web := sc . Web [ magic443 ]
addIngressBackend := func ( b * networkingv1 . IngressBackend , path string ) {
if b == nil {
return
}
if b . Service == nil {
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "InvalidIngressBackend" , "backend for path %q is missing service" , path )
return
}
var svc corev1 . Service
if err := a . Get ( ctx , types . NamespacedName { Namespace : ing . Namespace , Name : b . Service . Name } , & svc ) ; err != nil {
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "InvalidIngressBackend" , "failed to get service %q for path %q: %v" , b . Service . Name , path , err )
return
}
if svc . Spec . ClusterIP == "" || svc . Spec . ClusterIP == "None" {
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "InvalidIngressBackend" , "backend for path %q has invalid ClusterIP" , path )
return
}
var port int32
if b . Service . Port . Name != "" {
for _ , p := range svc . Spec . Ports {
if p . Name == b . Service . Port . Name {
port = p . Port
break
}
}
} else {
port = b . Service . Port . Number
}
if port == 0 {
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "InvalidIngressBackend" , "backend for path %q has invalid port" , path )
return
}
proto := "http://"
if port == 443 || b . Service . Port . Name == "https" {
proto = "https+insecure://"
}
web . Handlers [ path ] = & ipn . HTTPHandler {
Proxy : proto + svc . Spec . ClusterIP + ":" + fmt . Sprint ( port ) + path ,
}
}
addIngressBackend ( ing . Spec . DefaultBackend , "/" )
2023-10-12 00:33:23 +00:00
var tlsHost string // hostname or FQDN or empty
if ing . Spec . TLS != nil && len ( ing . Spec . TLS ) > 0 && len ( ing . Spec . TLS [ 0 ] . Hosts ) > 0 {
tlsHost = ing . Spec . TLS [ 0 ] . Hosts [ 0 ]
}
2023-08-24 19:18:17 +00:00
for _ , rule := range ing . Spec . Rules {
2023-10-12 00:33:23 +00:00
// Host is optional, but if it's present it must match the TLS host
// otherwise we ignore the rule.
if rule . Host != "" && rule . Host != tlsHost {
2023-08-24 19:18:17 +00:00
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "InvalidIngressBackend" , "rule with host %q ignored, unsupported" , rule . Host )
continue
}
for _ , p := range rule . HTTP . Paths {
2024-01-16 17:02:34 +00:00
// Send a warning if folks use Exact path type - to make
// it easier for us to support Exact path type matching
// in the future if needed.
// https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
if * p . PathType == networkingv1 . PathTypeExact {
msg := "Exact path type strict matching is currently not supported and requests will be routed as for Prefix path type. This behaviour might change in the future."
logger . Warnf ( fmt . Sprintf ( "Unsupported Path type exact for path %s. %s" , p . Path , msg ) )
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "UnsupportedPathTypeExact" , msg )
}
2023-08-24 19:18:17 +00:00
addIngressBackend ( & p . Backend , p . Path )
}
}
2024-01-22 19:20:23 +00:00
if len ( web . Handlers ) == 0 {
logger . Warn ( "Ingress contains no valid backends" )
a . recorder . Eventf ( ing , corev1 . EventTypeWarning , "NoValidBackends" , "no valid backends" )
return nil
}
2023-08-24 19:18:17 +00:00
crl := childResourceLabels ( ing . Name , ing . Namespace , "ingress" )
var tags [ ] string
if tstr , ok := ing . Annotations [ AnnotationTags ] ; ok {
tags = strings . Split ( tstr , "," )
}
hostname := ing . Namespace + "-" + ing . Name + "-ingress"
2023-10-12 00:33:23 +00:00
if tlsHost != "" {
hostname , _ , _ = strings . Cut ( tlsHost , "." )
2023-08-24 19:18:17 +00:00
}
sts := & tailscaleSTSConfig {
Hostname : hostname ,
ParentResourceName : ing . Name ,
ParentResourceUID : string ( ing . UID ) ,
ServeConfig : sc ,
Tags : tags ,
ChildResourceLabels : crl ,
2024-02-13 05:27:54 +00:00
ProxyClass : proxyClass ,
2023-08-24 19:18:17 +00:00
}
2024-02-08 06:45:42 +00:00
if val := ing . GetAnnotations ( ) [ AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy ] ; val == "true" {
sts . ForwardClusterTrafficViaL7IngressProxy = true
}
2023-08-30 07:31:37 +00:00
if _ , err := a . ssr . Provision ( ctx , logger , sts ) ; err != nil {
2023-08-24 19:18:17 +00:00
return fmt . Errorf ( "failed to provision: %w" , err )
}
2023-08-08 23:03:08 +00:00
_ , tsHost , _ , err := a . ssr . DeviceInfo ( ctx , crl )
2023-08-24 19:18:17 +00:00
if err != nil {
return fmt . Errorf ( "failed to get device ID: %w" , err )
}
if tsHost == "" {
logger . Debugf ( "no Tailscale hostname known yet, waiting for proxy pod to finish auth" )
// No hostname yet. Wait for the proxy pod to auth.
ing . Status . LoadBalancer . Ingress = nil
if err := a . Status ( ) . Update ( ctx , ing ) ; err != nil {
return fmt . Errorf ( "failed to update ingress status: %w" , err )
}
return nil
}
logger . Debugf ( "setting ingress hostname to %q" , tsHost )
ing . Status . LoadBalancer . Ingress = [ ] networkingv1 . IngressLoadBalancerIngress {
{
Hostname : tsHost ,
Ports : [ ] networkingv1 . IngressPortStatus {
{
Protocol : "TCP" ,
Port : 443 ,
} ,
} ,
} ,
}
if err := a . Status ( ) . Update ( ctx , ing ) ; err != nil {
return fmt . Errorf ( "failed to update ingress status: %w" , err )
}
return nil
}
func ( a * IngressReconciler ) shouldExpose ( ing * networkingv1 . Ingress ) bool {
return ing != nil &&
ing . Spec . IngressClassName != nil &&
2024-01-16 12:48:15 +00:00
* ing . Spec . IngressClassName == tailscaleIngressClassName
}
// validateIngressClass attempts to validate that 'tailscale' IngressClass
// included in Tailscale installation manifests exists and has not been modified
// to attempt to enable features that we do not support.
func ( a * IngressReconciler ) validateIngressClass ( ctx context . Context ) error {
ic := & networkingv1 . IngressClass {
ObjectMeta : metav1 . ObjectMeta {
Name : tailscaleIngressClassName ,
} ,
}
if err := a . Get ( ctx , client . ObjectKeyFromObject ( ic ) , ic ) ; apierrors . IsNotFound ( err ) {
return errors . New ( "Tailscale IngressClass not found in cluster. Latest installation manifests include a tailscale IngressClass - please update" )
} else if err != nil {
return fmt . Errorf ( "error retrieving 'tailscale' IngressClass: %w" , err )
}
if ic . Spec . Controller != tailscaleIngressControllerName {
return fmt . Errorf ( "Tailscale Ingress class controller name %s does not match tailscale Ingress controller name %s. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests" , ic . Spec . Controller , tailscaleIngressControllerName )
}
if ic . GetAnnotations ( ) [ ingressClassDefaultAnnotation ] != "" {
return fmt . Errorf ( "%s annotation is set on 'tailscale' IngressClass, but Tailscale Ingress controller does not support default Ingress class. Ensure that you are using 'tailscale' IngressClass from latest Tailscale installation manifests" , ingressClassDefaultAnnotation )
}
return nil
2023-08-24 19:18:17 +00:00
}