mirror of
https://github.com/tailscale/tailscale.git
synced 2025-04-30 20:51:02 +00:00
cmd/k8s-operator: allow to optionally configure an HTTP endpoint for the HA Ingress (#14986)
Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
parent
45f29a208a
commit
e11ff28443
@ -46,6 +46,9 @@ const (
|
|||||||
FinalizerNamePG = "tailscale.com/ingress-pg-finalizer"
|
FinalizerNamePG = "tailscale.com/ingress-pg-finalizer"
|
||||||
|
|
||||||
indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group"
|
indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group"
|
||||||
|
// annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as
|
||||||
|
// well as the default HTTPS endpoint).
|
||||||
|
annotationHTTPEndpoint = "tailscale.com/http-endpoint"
|
||||||
)
|
)
|
||||||
|
|
||||||
var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount)
|
var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount)
|
||||||
@ -202,16 +205,16 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
|||||||
// 3. Ensure that the serve config for the ProxyGroup contains the VIPService
|
// 3. Ensure that the serve config for the ProxyGroup contains the VIPService
|
||||||
cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName)
|
cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error getting ingress serve config: %w", err)
|
return fmt.Errorf("error getting Ingress serve config: %w", err)
|
||||||
}
|
}
|
||||||
if cm == nil {
|
if cm == nil {
|
||||||
logger.Infof("no ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.")
|
logger.Infof("no Ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName))
|
ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName))
|
||||||
handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger)
|
handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get handlers for ingress: %w", err)
|
return fmt.Errorf("failed to get handlers for Ingress: %w", err)
|
||||||
}
|
}
|
||||||
ingCfg := &ipn.ServiceConfig{
|
ingCfg := &ipn.ServiceConfig{
|
||||||
TCP: map[uint16]*ipn.TCPPortHandler{
|
TCP: map[uint16]*ipn.TCPPortHandler{
|
||||||
@ -225,6 +228,19 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add HTTP endpoint if configured.
|
||||||
|
if isHTTPEndpointEnabled(ing) {
|
||||||
|
logger.Infof("exposing Ingress over HTTP")
|
||||||
|
epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName))
|
||||||
|
ingCfg.TCP[80] = &ipn.TCPPortHandler{
|
||||||
|
HTTP: true,
|
||||||
|
}
|
||||||
|
ingCfg.Web[epHTTP] = &ipn.WebServerConfig{
|
||||||
|
Handlers: handlers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var gotCfg *ipn.ServiceConfig
|
var gotCfg *ipn.ServiceConfig
|
||||||
if cfg != nil && cfg.Services != nil {
|
if cfg != nil && cfg.Services != nil {
|
||||||
gotCfg = cfg.Services[serviceName]
|
gotCfg = cfg.Services[serviceName]
|
||||||
@ -248,16 +264,23 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
|||||||
tags = strings.Split(tstr, ",")
|
tags = strings.Split(tstr, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vipPorts := []string{"443"} // always 443 for Ingress
|
||||||
|
if isHTTPEndpointEnabled(ing) {
|
||||||
|
vipPorts = append(vipPorts, "80")
|
||||||
|
}
|
||||||
|
|
||||||
vipSvc := &VIPService{
|
vipSvc := &VIPService{
|
||||||
Name: serviceName,
|
Name: serviceName,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Ports: []string{"443"}, // always 443 for Ingress
|
Ports: vipPorts,
|
||||||
Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID),
|
Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID),
|
||||||
}
|
}
|
||||||
if existingVIPSvc != nil {
|
if existingVIPSvc != nil {
|
||||||
vipSvc.Addrs = existingVIPSvc.Addrs
|
vipSvc.Addrs = existingVIPSvc.Addrs
|
||||||
}
|
}
|
||||||
if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) {
|
if existingVIPSvc == nil ||
|
||||||
|
!reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) ||
|
||||||
|
!reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) {
|
||||||
logger.Infof("Ensuring VIPService %q exists and is up to date", hostname)
|
logger.Infof("Ensuring VIPService %q exists and is up to date", hostname)
|
||||||
if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil {
|
if err := a.tsClient.createOrUpdateVIPService(ctx, vipSvc); err != nil {
|
||||||
logger.Infof("error creating VIPService: %v", err)
|
logger.Infof("error creating VIPService: %v", err)
|
||||||
@ -267,16 +290,22 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin
|
|||||||
|
|
||||||
// 5. Update Ingress status
|
// 5. Update Ingress status
|
||||||
oldStatus := ing.Status.DeepCopy()
|
oldStatus := ing.Status.DeepCopy()
|
||||||
// TODO(irbekrm): once we have ingress ProxyGroup, we can determine if instances are ready to route traffic to the VIPService
|
ports := []networkingv1.IngressPortStatus{
|
||||||
ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{
|
|
||||||
{
|
|
||||||
Hostname: dnsName,
|
|
||||||
Ports: []networkingv1.IngressPortStatus{
|
|
||||||
{
|
{
|
||||||
Protocol: "TCP",
|
Protocol: "TCP",
|
||||||
Port: 443,
|
Port: 443,
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
|
if isHTTPEndpointEnabled(ing) {
|
||||||
|
ports = append(ports, networkingv1.IngressPortStatus{
|
||||||
|
Protocol: "TCP",
|
||||||
|
Port: 80,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{
|
||||||
|
{
|
||||||
|
Hostname: dnsName,
|
||||||
|
Ports: ports,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) {
|
if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) {
|
||||||
@ -569,3 +598,11 @@ func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet.
|
||||||
|
func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool {
|
||||||
|
if ing == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return ing.Annotations[annotationHTTPEndpoint] == "enabled"
|
||||||
|
}
|
||||||
|
@ -8,6 +8,8 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"maps"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"slices"
|
"slices"
|
||||||
@ -18,81 +20,18 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
"tailscale.com/ipn"
|
"tailscale.com/ipn"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/types/ptr"
|
"tailscale.com/types/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIngressPGReconciler(t *testing.T) {
|
func TestIngressPGReconciler(t *testing.T) {
|
||||||
tsIngressClass := &networkingv1.IngressClass{
|
ingPGR, fc, ft := setupIngressTest(t)
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
|
||||||
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-create the ProxyGroup
|
|
||||||
pg := &tsapi.ProxyGroup{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pg",
|
|
||||||
Generation: 1,
|
|
||||||
},
|
|
||||||
Spec: tsapi.ProxyGroupSpec{
|
|
||||||
Type: tsapi.ProxyGroupTypeIngress,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-create the ConfigMap for the ProxyGroup
|
|
||||||
pgConfigMap := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pg-ingress-config",
|
|
||||||
Namespace: "operator-ns",
|
|
||||||
},
|
|
||||||
BinaryData: map[string][]byte{
|
|
||||||
"serve-config.json": []byte(`{"Services":{}}`),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
fc := fake.NewClientBuilder().
|
|
||||||
WithScheme(tsapi.GlobalScheme).
|
|
||||||
WithObjects(pg, pgConfigMap, tsIngressClass).
|
|
||||||
WithStatusSubresource(pg).
|
|
||||||
Build()
|
|
||||||
mustUpdateStatus(t, fc, "", pg.Name, func(pg *tsapi.ProxyGroup) {
|
|
||||||
pg.Status.Conditions = []metav1.Condition{
|
|
||||||
{
|
|
||||||
Type: string(tsapi.ProxyGroupReady),
|
|
||||||
Status: metav1.ConditionTrue,
|
|
||||||
ObservedGeneration: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
|
||||||
ft := &fakeTSClient{}
|
|
||||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
|
||||||
zl, err := zap.NewDevelopment()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lc := &fakeLocalClient{
|
|
||||||
status: &ipnstate.Status{
|
|
||||||
CurrentTailnet: &ipnstate.TailnetStatus{
|
|
||||||
MagicDNSSuffix: "ts.net",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
ingPGR := &IngressPGReconciler{
|
|
||||||
Client: fc,
|
|
||||||
tsClient: ft,
|
|
||||||
tsnetServer: fakeTsnetServer,
|
|
||||||
defaultTags: []string{"tag:k8s"},
|
|
||||||
tsNamespace: "operator-ns",
|
|
||||||
logger: zl.Sugar(),
|
|
||||||
recorder: record.NewFakeRecorder(10),
|
|
||||||
lc: lc,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test 1: Default tags
|
|
||||||
ing := &networkingv1.Ingress{
|
ing := &networkingv1.Ingress{
|
||||||
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -122,8 +61,74 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
|
|
||||||
// Verify initial reconciliation
|
// Verify initial reconciliation
|
||||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
|
verifyVIPService(t, ft, "svc:my-svc", []string{"443"})
|
||||||
|
|
||||||
// Get and verify the ConfigMap was updated
|
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
||||||
|
ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test"
|
||||||
|
})
|
||||||
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
|
|
||||||
|
// Verify VIPService uses custom tags
|
||||||
|
vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getting VIPService: %v", err)
|
||||||
|
}
|
||||||
|
if vipSvc == nil {
|
||||||
|
t.Fatal("VIPService not created")
|
||||||
|
}
|
||||||
|
wantTags := []string{"tag:custom", "tag:test"} // custom tags only
|
||||||
|
gotTags := slices.Clone(vipSvc.Tags)
|
||||||
|
slices.Sort(gotTags)
|
||||||
|
slices.Sort(wantTags)
|
||||||
|
if !slices.Equal(gotTags, wantTags) {
|
||||||
|
t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create second Ingress
|
||||||
|
ing2 := &networkingv1.Ingress{
|
||||||
|
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "my-other-ingress",
|
||||||
|
Namespace: "default",
|
||||||
|
UID: types.UID("5678-UID"),
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"tailscale.com/proxy-group": "test-pg",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: networkingv1.IngressSpec{
|
||||||
|
IngressClassName: ptr.To("tailscale"),
|
||||||
|
DefaultBackend: &networkingv1.IngressBackend{
|
||||||
|
Service: &networkingv1.IngressServiceBackend{
|
||||||
|
Name: "test",
|
||||||
|
Port: networkingv1.ServiceBackendPort{
|
||||||
|
Number: 8080,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TLS: []networkingv1.IngressTLS{
|
||||||
|
{Hosts: []string{"my-other-svc.tailnetxyz.ts.net"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mustCreate(t, fc, ing2)
|
||||||
|
|
||||||
|
// Verify second Ingress reconciliation
|
||||||
|
expectReconciled(t, ingPGR, "default", "my-other-ingress")
|
||||||
|
verifyServeConfig(t, fc, "svc:my-other-svc", false)
|
||||||
|
verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"})
|
||||||
|
|
||||||
|
// Verify first Ingress is still working
|
||||||
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
|
verifyVIPService(t, ft, "svc:my-svc", []string{"443"})
|
||||||
|
|
||||||
|
// Delete second Ingress
|
||||||
|
if err := fc.Delete(context.Background(), ing2); err != nil {
|
||||||
|
t.Fatalf("deleting second Ingress: %v", err)
|
||||||
|
}
|
||||||
|
expectReconciled(t, ingPGR, "default", "my-other-ingress")
|
||||||
|
|
||||||
|
// Verify second Ingress cleanup
|
||||||
cm := &corev1.ConfigMap{}
|
cm := &corev1.ConfigMap{}
|
||||||
if err := fc.Get(context.Background(), types.NamespacedName{
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
Name: "test-pg-ingress-config",
|
Name: "test-pg-ingress-config",
|
||||||
@ -137,46 +142,16 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
t.Fatalf("unmarshaling serve config: %v", err)
|
t.Fatalf("unmarshaling serve config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify first Ingress is still configured
|
||||||
if cfg.Services["svc:my-svc"] == nil {
|
if cfg.Services["svc:my-svc"] == nil {
|
||||||
t.Error("expected serve config to contain VIPService configuration")
|
t.Error("first Ingress service config was incorrectly removed")
|
||||||
|
}
|
||||||
|
// Verify second Ingress was cleaned up
|
||||||
|
if cfg.Services["svc:my-other-svc"] != nil {
|
||||||
|
t.Error("second Ingress service config was not cleaned up")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify VIPService uses default tags
|
// Delete the first Ingress and verify cleanup
|
||||||
vipSvc, err := ft.getVIPService(context.Background(), "svc:my-svc")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("getting VIPService: %v", err)
|
|
||||||
}
|
|
||||||
if vipSvc == nil {
|
|
||||||
t.Fatal("VIPService not created")
|
|
||||||
}
|
|
||||||
wantTags := []string{"tag:k8s"} // default tags
|
|
||||||
if !slices.Equal(vipSvc.Tags, wantTags) {
|
|
||||||
t.Errorf("incorrect VIPService tags: got %v, want %v", vipSvc.Tags, wantTags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test 2: Custom tags
|
|
||||||
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
|
||||||
ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test"
|
|
||||||
})
|
|
||||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
|
||||||
|
|
||||||
// Verify VIPService uses custom tags
|
|
||||||
vipSvc, err = ft.getVIPService(context.Background(), "svc:my-svc")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("getting VIPService: %v", err)
|
|
||||||
}
|
|
||||||
if vipSvc == nil {
|
|
||||||
t.Fatal("VIPService not created")
|
|
||||||
}
|
|
||||||
wantTags = []string{"tag:custom", "tag:test"} // custom tags only
|
|
||||||
gotTags := slices.Clone(vipSvc.Tags)
|
|
||||||
slices.Sort(gotTags)
|
|
||||||
slices.Sort(wantTags)
|
|
||||||
if !slices.Equal(gotTags, wantTags) {
|
|
||||||
t.Errorf("incorrect VIPService tags: got %v, want %v", gotTags, wantTags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the Ingress and verify cleanup
|
|
||||||
if err := fc.Delete(context.Background(), ing); err != nil {
|
if err := fc.Delete(context.Background(), ing); err != nil {
|
||||||
t.Fatalf("deleting Ingress: %v", err)
|
t.Fatalf("deleting Ingress: %v", err)
|
||||||
}
|
}
|
||||||
@ -335,3 +310,233 @@ func TestValidateIngress(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) {
|
||||||
|
ingPGR, fc, ft := setupIngressTest(t)
|
||||||
|
|
||||||
|
// Create test Ingress with HTTP endpoint enabled
|
||||||
|
ing := &networkingv1.Ingress{
|
||||||
|
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-ingress",
|
||||||
|
Namespace: "default",
|
||||||
|
UID: types.UID("1234-UID"),
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"tailscale.com/proxy-group": "test-pg",
|
||||||
|
"tailscale.com/http-endpoint": "enabled",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: networkingv1.IngressSpec{
|
||||||
|
IngressClassName: ptr.To("tailscale"),
|
||||||
|
DefaultBackend: &networkingv1.IngressBackend{
|
||||||
|
Service: &networkingv1.IngressServiceBackend{
|
||||||
|
Name: "test",
|
||||||
|
Port: networkingv1.ServiceBackendPort{
|
||||||
|
Number: 8080,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TLS: []networkingv1.IngressTLS{
|
||||||
|
{Hosts: []string{"my-svc"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := fc.Create(context.Background(), ing); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify initial reconciliation with HTTP enabled
|
||||||
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
|
verifyVIPService(t, ft, "svc:my-svc", []string{"80", "443"})
|
||||||
|
verifyServeConfig(t, fc, "svc:my-svc", true)
|
||||||
|
|
||||||
|
// Verify Ingress status
|
||||||
|
ing = &networkingv1.Ingress{}
|
||||||
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
|
Name: "test-ingress",
|
||||||
|
Namespace: "default",
|
||||||
|
}, ing); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantStatus := []networkingv1.IngressPortStatus{
|
||||||
|
{Port: 443, Protocol: "TCP"},
|
||||||
|
{Port: 80, Protocol: "TCP"},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) {
|
||||||
|
t.Errorf("incorrect status ports: got %v, want %v",
|
||||||
|
ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove HTTP endpoint annotation
|
||||||
|
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
||||||
|
delete(ing.Annotations, "tailscale.com/http-endpoint")
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify reconciliation after removing HTTP
|
||||||
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
|
verifyVIPService(t, ft, "svc:my-svc", []string{"443"})
|
||||||
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
|
|
||||||
|
// Verify Ingress status
|
||||||
|
ing = &networkingv1.Ingress{}
|
||||||
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
|
Name: "test-ingress",
|
||||||
|
Namespace: "default",
|
||||||
|
}, ing); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wantStatus = []networkingv1.IngressPortStatus{
|
||||||
|
{Port: 443, Protocol: "TCP"},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) {
|
||||||
|
t.Errorf("incorrect status ports: got %v, want %v",
|
||||||
|
ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVIPService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) {
|
||||||
|
t.Helper()
|
||||||
|
vipSvc, err := ft.getVIPService(context.Background(), tailcfg.ServiceName(serviceName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getting VIPService %q: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
if vipSvc == nil {
|
||||||
|
t.Fatalf("VIPService %q not created", serviceName)
|
||||||
|
}
|
||||||
|
gotPorts := slices.Clone(vipSvc.Ports)
|
||||||
|
slices.Sort(gotPorts)
|
||||||
|
slices.Sort(wantPorts)
|
||||||
|
if !slices.Equal(gotPorts, wantPorts) {
|
||||||
|
t.Errorf("incorrect ports for VIPService %q: got %v, want %v", serviceName, gotPorts, wantPorts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantHTTP bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
cm := &corev1.ConfigMap{}
|
||||||
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
|
Name: "test-pg-ingress-config",
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
}, cm); err != nil {
|
||||||
|
t.Fatalf("getting ConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &ipn.ServeConfig{}
|
||||||
|
if err := json.Unmarshal(cm.BinaryData["serve-config.json"], cfg); err != nil {
|
||||||
|
t.Fatalf("unmarshaling serve config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Looking for service %q in config: %+v", serviceName, cfg)
|
||||||
|
|
||||||
|
svc := cfg.Services[tailcfg.ServiceName(serviceName)]
|
||||||
|
if svc == nil {
|
||||||
|
t.Fatalf("service %q not found in serve config, services: %+v", serviceName, maps.Keys(cfg.Services))
|
||||||
|
}
|
||||||
|
|
||||||
|
wantHandlers := 1
|
||||||
|
if wantHTTP {
|
||||||
|
wantHandlers = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check TCP handlers
|
||||||
|
if len(svc.TCP) != wantHandlers {
|
||||||
|
t.Errorf("incorrect number of TCP handlers for service %q: got %d, want %d", serviceName, len(svc.TCP), wantHandlers)
|
||||||
|
}
|
||||||
|
if wantHTTP {
|
||||||
|
if h, ok := svc.TCP[uint16(80)]; !ok {
|
||||||
|
t.Errorf("HTTP (port 80) handler not found for service %q", serviceName)
|
||||||
|
} else if !h.HTTP {
|
||||||
|
t.Errorf("HTTP not enabled for port 80 handler for service %q", serviceName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if h, ok := svc.TCP[uint16(443)]; !ok {
|
||||||
|
t.Errorf("HTTPS (port 443) handler not found for service %q", serviceName)
|
||||||
|
} else if !h.HTTPS {
|
||||||
|
t.Errorf("HTTPS not enabled for port 443 handler for service %q", serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Web handlers
|
||||||
|
if len(svc.Web) != wantHandlers {
|
||||||
|
t.Errorf("incorrect number of Web handlers for service %q: got %d, want %d", serviceName, len(svc.Web), wantHandlers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
tsIngressClass := &networkingv1.IngressClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
||||||
|
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-create the ProxyGroup
|
||||||
|
pg := &tsapi.ProxyGroup{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-pg",
|
||||||
|
Generation: 1,
|
||||||
|
},
|
||||||
|
Spec: tsapi.ProxyGroupSpec{
|
||||||
|
Type: tsapi.ProxyGroupTypeIngress,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-create the ConfigMap for the ProxyGroup
|
||||||
|
pgConfigMap := &corev1.ConfigMap{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-pg-ingress-config",
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
},
|
||||||
|
BinaryData: map[string][]byte{
|
||||||
|
"serve-config.json": []byte(`{"Services":{}}`),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fc := fake.NewClientBuilder().
|
||||||
|
WithScheme(tsapi.GlobalScheme).
|
||||||
|
WithObjects(pg, pgConfigMap, tsIngressClass).
|
||||||
|
WithStatusSubresource(pg).
|
||||||
|
Build()
|
||||||
|
|
||||||
|
// Set ProxyGroup status to ready
|
||||||
|
pg.Status.Conditions = []metav1.Condition{
|
||||||
|
{
|
||||||
|
Type: string(tsapi.ProxyGroupReady),
|
||||||
|
Status: metav1.ConditionTrue,
|
||||||
|
ObservedGeneration: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ft := &fakeTSClient{}
|
||||||
|
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||||
|
zl, err := zap.NewDevelopment()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lc := &fakeLocalClient{
|
||||||
|
status: &ipnstate.Status{
|
||||||
|
CurrentTailnet: &ipnstate.TailnetStatus{
|
||||||
|
MagicDNSSuffix: "ts.net",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ingPGR := &IngressPGReconciler{
|
||||||
|
Client: fc,
|
||||||
|
tsClient: ft,
|
||||||
|
tsnetServer: fakeTsnetServer,
|
||||||
|
defaultTags: []string{"tag:k8s"},
|
||||||
|
tsNamespace: "operator-ns",
|
||||||
|
logger: zl.Sugar(),
|
||||||
|
recorder: record.NewFakeRecorder(10),
|
||||||
|
lc: lc,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ingPGR, fc, ft
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user