mirror of
https://github.com/tailscale/tailscale.git
synced 2025-10-29 07:09:33 +00:00
cmd/{k8s-operator,k8s-proxy}: add kube-apiserver ProxyGroup type (#16266)
Adds a new k8s-proxy command to convert operator's in-process proxy to
a separately deployable type of ProxyGroup: kube-apiserver. k8s-proxy
reads in a new config file written by the operator, modelled on tailscaled's
conffile but with some modifications to ensure multiple versions of the
config can co-exist within a file. This should make it much easier to
support reading that config file from a Kube Secret with a stable file name.
To avoid needing to give the operator ClusterRole{,Binding} permissions,
the helm chart now optionally deploys a new static ServiceAccount for
the API Server proxy to use if in auth mode.
Proxies deployed by kube-apiserver ProxyGroups currently work the same as
the operator's in-process proxy. They do not yet leverage Tailscale Services
for presenting a single HA DNS name.
Updates #13358
Change-Id: Ib6ead69b2173c5e1929f3c13fb48a9a5362195d8
Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
@@ -200,7 +200,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp
|
||||
github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+
|
||||
github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp
|
||||
github.com/tailscale/hujson from tailscale.com/ipn/conffile
|
||||
github.com/tailscale/hujson from tailscale.com/ipn/conffile+
|
||||
L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+
|
||||
L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink
|
||||
github.com/tailscale/peercred from tailscale.com/ipn/ipnauth
|
||||
@@ -822,6 +822,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording
|
||||
tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/kube/k8s-proxy/conf from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+
|
||||
tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore
|
||||
tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
{{ if eq .Values.apiServerProxyConfig.mode "true" }}
|
||||
# If old setting used, enable both old (operator) and new (ProxyGroup) workflows.
|
||||
# If new setting used, enable only new workflow.
|
||||
{{ if or (eq .Values.apiServerProxyConfig.mode "true")
|
||||
(eq .Values.apiServerProxyConfig.allowImpersonation "true") }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-apiserver-auth-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
@@ -16,9 +25,14 @@ kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tailscale-auth-proxy
|
||||
subjects:
|
||||
{{- if eq .Values.apiServerProxyConfig.mode "true" }}
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- kind: ServiceAccount
|
||||
name: kube-apiserver-auth-proxy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: tailscale-auth-proxy
|
||||
|
||||
@@ -92,6 +92,13 @@ ingressClass:
|
||||
# If you need more configuration options, take a look at ProxyClass:
|
||||
# https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource
|
||||
proxyConfig:
|
||||
# Configure the proxy image to use instead of the default tailscale/tailscale:latest.
|
||||
# Applying a ProxyClass with `spec.statefulSet.pod.tailscaleContainer.image`
|
||||
# set will override any defaults here.
|
||||
#
|
||||
# Note that ProxyGroups of type "kube-apiserver" use a different default image,
|
||||
# tailscale/k8s-proxy:latest, and it is currently only possible to override
|
||||
# that image via the same ProxyClass field.
|
||||
image:
|
||||
# Repository defaults to DockerHub, but images are also synced to ghcr.io/tailscale/tailscale.
|
||||
repository: tailscale/tailscale
|
||||
@@ -115,6 +122,15 @@ proxyConfig:
|
||||
# Kubernetes API server.
|
||||
# https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy
|
||||
apiServerProxyConfig:
|
||||
# Set to "true" to create the ClusterRole permissions required for the API
|
||||
# server proxy's auth mode. In auth mode, the API server proxy impersonates
|
||||
# groups and users based on tailnet ACL grants. Required for ProxyGroups of
|
||||
# type "kube-apiserver" running in auth mode.
|
||||
allowImpersonation: "false" # "true", "false"
|
||||
|
||||
# If true or noauth, the operator will run an in-process API server proxy.
|
||||
# You can deploy a ProxyGroup of type "kube-apiserver" to run a high
|
||||
# availability set of API server proxies instead.
|
||||
mode: "false" # "true", "false", "noauth"
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
@@ -1379,12 +1379,21 @@ spec:
|
||||
type: string
|
||||
image:
|
||||
description: |-
|
||||
Container image name. By default images are pulled from
|
||||
docker.io/tailscale/tailscale, but the official images are also
|
||||
available at ghcr.io/tailscale/tailscale. Specifying image name here
|
||||
will override any proxy image values specified via the Kubernetes
|
||||
operator's Helm chart values or PROXY_IMAGE env var in the operator
|
||||
Deployment.
|
||||
Container image name. By default images are pulled from docker.io/tailscale,
|
||||
but the official images are also available at ghcr.io/tailscale.
|
||||
|
||||
For all uses except on ProxyGroups of type "kube-apiserver", this image must
|
||||
be either tailscale/tailscale, or an equivalent mirror of that image.
|
||||
To apply to ProxyGroups of type "kube-apiserver", this image must be
|
||||
tailscale/k8s-proxy or a mirror of that image.
|
||||
|
||||
For "tailscale/tailscale"-based proxies, specifying image name here will
|
||||
override any proxy image values specified via the Kubernetes operator's
|
||||
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
|
||||
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
|
||||
configure your own default, and this field is the only way to use a
|
||||
custom image.
|
||||
|
||||
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -1655,7 +1664,9 @@ spec:
|
||||
PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
type: string
|
||||
tailscaleInitContainer:
|
||||
description: Configuration for the proxy init container that enables forwarding.
|
||||
description: |-
|
||||
Configuration for the proxy init container that enables forwarding.
|
||||
Not valid to apply to ProxyGroups of type "kube-apiserver".
|
||||
type: object
|
||||
properties:
|
||||
debug:
|
||||
@@ -1709,12 +1720,21 @@ spec:
|
||||
type: string
|
||||
image:
|
||||
description: |-
|
||||
Container image name. By default images are pulled from
|
||||
docker.io/tailscale/tailscale, but the official images are also
|
||||
available at ghcr.io/tailscale/tailscale. Specifying image name here
|
||||
will override any proxy image values specified via the Kubernetes
|
||||
operator's Helm chart values or PROXY_IMAGE env var in the operator
|
||||
Deployment.
|
||||
Container image name. By default images are pulled from docker.io/tailscale,
|
||||
but the official images are also available at ghcr.io/tailscale.
|
||||
|
||||
For all uses except on ProxyGroups of type "kube-apiserver", this image must
|
||||
be either tailscale/tailscale, or an equivalent mirror of that image.
|
||||
To apply to ProxyGroups of type "kube-apiserver", this image must be
|
||||
tailscale/k8s-proxy or a mirror of that image.
|
||||
|
||||
For "tailscale/tailscale"-based proxies, specifying image name here will
|
||||
override any proxy image values specified via the Kubernetes operator's
|
||||
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
|
||||
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
|
||||
configure your own default, and this field is the only way to use a
|
||||
custom image.
|
||||
|
||||
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
|
||||
@@ -77,6 +77,22 @@ spec:
|
||||
must not start with a dash and must be between 1 and 62 characters long.
|
||||
type: string
|
||||
pattern: ^[a-z0-9][a-z0-9-]{0,61}$
|
||||
kubeAPIServer:
|
||||
description: |-
|
||||
KubeAPIServer contains configuration specific to the kube-apiserver
|
||||
ProxyGroup type. This field is only used when Type is set to "kube-apiserver".
|
||||
type: object
|
||||
properties:
|
||||
mode:
|
||||
description: |-
|
||||
Mode to run the API server proxy in. Supported modes are auth and noauth.
|
||||
In auth mode, requests from the tailnet proxied over to the Kubernetes
|
||||
API server are additionally impersonated using the sender's tailnet identity.
|
||||
If not specified, defaults to auth mode.
|
||||
type: string
|
||||
enum:
|
||||
- auth
|
||||
- noauth
|
||||
proxyClass:
|
||||
description: |-
|
||||
ProxyClass is the name of the ProxyClass custom resource that contains
|
||||
@@ -106,12 +122,13 @@ spec:
|
||||
pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$
|
||||
type:
|
||||
description: |-
|
||||
Type of the ProxyGroup proxies. Supported types are egress and ingress.
|
||||
Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver.
|
||||
Type is immutable once a ProxyGroup is created.
|
||||
type: string
|
||||
enum:
|
||||
- egress
|
||||
- ingress
|
||||
- kube-apiserver
|
||||
x-kubernetes-validations:
|
||||
- rule: self == oldSelf
|
||||
message: ProxyGroup type is immutable
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-apiserver-auth-proxy
|
||||
namespace: tailscale
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
@@ -18,6 +24,9 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
||||
namespace: tailscale
|
||||
- kind: ServiceAccount
|
||||
name: kube-apiserver-auth-proxy
|
||||
namespace: tailscale
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: tailscale-auth-proxy
|
||||
|
||||
@@ -1852,12 +1852,21 @@ spec:
|
||||
type: array
|
||||
image:
|
||||
description: |-
|
||||
Container image name. By default images are pulled from
|
||||
docker.io/tailscale/tailscale, but the official images are also
|
||||
available at ghcr.io/tailscale/tailscale. Specifying image name here
|
||||
will override any proxy image values specified via the Kubernetes
|
||||
operator's Helm chart values or PROXY_IMAGE env var in the operator
|
||||
Deployment.
|
||||
Container image name. By default images are pulled from docker.io/tailscale,
|
||||
but the official images are also available at ghcr.io/tailscale.
|
||||
|
||||
For all uses except on ProxyGroups of type "kube-apiserver", this image must
|
||||
be either tailscale/tailscale, or an equivalent mirror of that image.
|
||||
To apply to ProxyGroups of type "kube-apiserver", this image must be
|
||||
tailscale/k8s-proxy or a mirror of that image.
|
||||
|
||||
For "tailscale/tailscale"-based proxies, specifying image name here will
|
||||
override any proxy image values specified via the Kubernetes operator's
|
||||
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
|
||||
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
|
||||
configure your own default, and this field is the only way to use a
|
||||
custom image.
|
||||
|
||||
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -2129,7 +2138,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
tailscaleInitContainer:
|
||||
description: Configuration for the proxy init container that enables forwarding.
|
||||
description: |-
|
||||
Configuration for the proxy init container that enables forwarding.
|
||||
Not valid to apply to ProxyGroups of type "kube-apiserver".
|
||||
properties:
|
||||
debug:
|
||||
description: |-
|
||||
@@ -2182,12 +2193,21 @@ spec:
|
||||
type: array
|
||||
image:
|
||||
description: |-
|
||||
Container image name. By default images are pulled from
|
||||
docker.io/tailscale/tailscale, but the official images are also
|
||||
available at ghcr.io/tailscale/tailscale. Specifying image name here
|
||||
will override any proxy image values specified via the Kubernetes
|
||||
operator's Helm chart values or PROXY_IMAGE env var in the operator
|
||||
Deployment.
|
||||
Container image name. By default images are pulled from docker.io/tailscale,
|
||||
but the official images are also available at ghcr.io/tailscale.
|
||||
|
||||
For all uses except on ProxyGroups of type "kube-apiserver", this image must
|
||||
be either tailscale/tailscale, or an equivalent mirror of that image.
|
||||
To apply to ProxyGroups of type "kube-apiserver", this image must be
|
||||
tailscale/k8s-proxy or a mirror of that image.
|
||||
|
||||
For "tailscale/tailscale"-based proxies, specifying image name here will
|
||||
override any proxy image values specified via the Kubernetes operator's
|
||||
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
|
||||
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
|
||||
configure your own default, and this field is the only way to use a
|
||||
custom image.
|
||||
|
||||
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -2904,6 +2924,22 @@ spec:
|
||||
must not start with a dash and must be between 1 and 62 characters long.
|
||||
pattern: ^[a-z0-9][a-z0-9-]{0,61}$
|
||||
type: string
|
||||
kubeAPIServer:
|
||||
description: |-
|
||||
KubeAPIServer contains configuration specific to the kube-apiserver
|
||||
ProxyGroup type. This field is only used when Type is set to "kube-apiserver".
|
||||
properties:
|
||||
mode:
|
||||
description: |-
|
||||
Mode to run the API server proxy in. Supported modes are auth and noauth.
|
||||
In auth mode, requests from the tailnet proxied over to the Kubernetes
|
||||
API server are additionally impersonated using the sender's tailnet identity.
|
||||
If not specified, defaults to auth mode.
|
||||
enum:
|
||||
- auth
|
||||
- noauth
|
||||
type: string
|
||||
type: object
|
||||
proxyClass:
|
||||
description: |-
|
||||
ProxyClass is the name of the ProxyClass custom resource that contains
|
||||
@@ -2933,11 +2969,12 @@ spec:
|
||||
type: array
|
||||
type:
|
||||
description: |-
|
||||
Type of the ProxyGroup proxies. Supported types are egress and ingress.
|
||||
Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver.
|
||||
Type is immutable once a ProxyGroup is created.
|
||||
enum:
|
||||
- egress
|
||||
- ingress
|
||||
- kube-apiserver
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: ProxyGroup type is immutable
|
||||
|
||||
@@ -239,7 +239,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
// This checks and ensures that Tailscale Service's owner references are updated
|
||||
// for this Ingress and errors if that is not possible (i.e. because it
|
||||
// appears that the Tailscale Service has been created by a non-operator actor).
|
||||
updatedAnnotations, err := r.ownerAnnotations(existingTSSvc)
|
||||
updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc)
|
||||
if err != nil {
|
||||
const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition"
|
||||
msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr)
|
||||
@@ -867,9 +867,9 @@ type OwnerRef struct {
|
||||
// nil, but does not contain an owner reference we return an error as this likely means
|
||||
// that the Service was created by somthing other than a Tailscale
|
||||
// Kubernetes operator.
|
||||
func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) {
|
||||
func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string]string, error) {
|
||||
ref := OwnerRef{
|
||||
OperatorID: r.operatorID,
|
||||
OperatorID: operatorID,
|
||||
}
|
||||
if svc == nil {
|
||||
c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}}
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
@@ -650,6 +652,53 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOwnerAnnotations(t *testing.T) {
|
||||
singleSelfOwner := map[string]string{
|
||||
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`,
|
||||
}
|
||||
|
||||
for name, tc := range map[string]struct {
|
||||
svc *tailscale.VIPService
|
||||
wantAnnotations map[string]string
|
||||
wantErr string
|
||||
}{
|
||||
"no_svc": {
|
||||
svc: nil,
|
||||
wantAnnotations: singleSelfOwner,
|
||||
},
|
||||
"empty_svc": {
|
||||
svc: &tailscale.VIPService{},
|
||||
wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator",
|
||||
},
|
||||
"already_owner": {
|
||||
svc: &tailscale.VIPService{
|
||||
Annotations: singleSelfOwner,
|
||||
},
|
||||
wantAnnotations: singleSelfOwner,
|
||||
},
|
||||
"add_owner": {
|
||||
svc: &tailscale.VIPService{
|
||||
Annotations: map[string]string{
|
||||
ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`,
|
||||
},
|
||||
},
|
||||
wantAnnotations: map[string]string{
|
||||
ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := ownerAnnotations("self-id", tc.svc)
|
||||
if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) {
|
||||
t.Errorf("ownerAnnotations() error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantAnnotations, got); diff != "" {
|
||||
t.Errorf("ownerAnnotations() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
klabels "k8s.io/apimachinery/pkg/labels"
|
||||
@@ -77,6 +78,7 @@ func main() {
|
||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||
k8sProxyImage = defaultEnv("K8S_PROXY_IMAGE", "tailscale/k8s-proxy:latest")
|
||||
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||
tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "")
|
||||
@@ -110,17 +112,27 @@ func main() {
|
||||
// The operator can run either as a plain operator or it can
|
||||
// additionally act as api-server proxy
|
||||
// https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy.
|
||||
mode := apiproxy.ParseAPIProxyMode()
|
||||
if mode == apiproxy.APIServerProxyModeDisabled {
|
||||
mode := parseAPIProxyMode()
|
||||
if mode == apiServerProxyModeDisabled {
|
||||
hostinfo.SetApp(kubetypes.AppOperator)
|
||||
} else {
|
||||
hostinfo.SetApp(kubetypes.AppAPIServerProxy)
|
||||
hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy)
|
||||
}
|
||||
|
||||
s, tsc := initTSNet(zlog, loginServer)
|
||||
defer s.Close()
|
||||
restConfig := config.GetConfigOrDie()
|
||||
apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode)
|
||||
if mode != apiServerProxyModeDisabled {
|
||||
ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled)
|
||||
if err != nil {
|
||||
zlog.Fatalf("error creating API server proxy: %v", err)
|
||||
}
|
||||
go func() {
|
||||
if err := ap.Run(context.Background()); err != nil {
|
||||
zlog.Fatalf("error running API server proxy: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
rOpts := reconcilerOpts{
|
||||
log: zlog,
|
||||
tsServer: s,
|
||||
@@ -128,6 +140,7 @@ func main() {
|
||||
tailscaleNamespace: tsNamespace,
|
||||
restConfig: restConfig,
|
||||
proxyImage: image,
|
||||
k8sProxyImage: k8sProxyImage,
|
||||
proxyPriorityClassName: priorityClassName,
|
||||
proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer,
|
||||
proxyTags: tags,
|
||||
@@ -415,7 +428,6 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
Complete(&HAServiceReconciler{
|
||||
recorder: eventRecorder,
|
||||
tsClient: opts.tsClient,
|
||||
tsnetServer: opts.tsServer,
|
||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("service-pg-reconciler"),
|
||||
@@ -625,13 +637,14 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{})
|
||||
proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog))
|
||||
nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog))
|
||||
saFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(serviceAccountHandlerForProxyGroup(mgr.GetClient(), startlog))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
For(&tsapi.ProxyGroup{}).
|
||||
Named("proxygroup-reconciler").
|
||||
Watches(&corev1.Service{}, ownedByProxyGroupFilter).
|
||||
Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter).
|
||||
Watches(&corev1.ServiceAccount{}, saFilterForProxyGroup).
|
||||
Watches(&corev1.Secret{}, ownedByProxyGroupFilter).
|
||||
Watches(&rbacv1.Role{}, ownedByProxyGroupFilter).
|
||||
Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter).
|
||||
@@ -645,7 +658,8 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
tsClient: opts.tsClient,
|
||||
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
proxyImage: opts.proxyImage,
|
||||
tsProxyImage: opts.proxyImage,
|
||||
k8sProxyImage: opts.k8sProxyImage,
|
||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||
tsFirewallMode: opts.proxyFirewallMode,
|
||||
defaultProxyClass: opts.defaultProxyClass,
|
||||
@@ -668,6 +682,7 @@ type reconcilerOpts struct {
|
||||
tailscaleNamespace string // namespace in which operator resources will be deployed
|
||||
restConfig *rest.Config // config for connecting to the kube API server
|
||||
proxyImage string // <proxy-image-repo>:<proxy-image-tag>
|
||||
k8sProxyImage string // <k8s-proxy-image-repo>:<k8s-proxy-image-tag>
|
||||
// proxyPriorityClassName isPriorityClass to be set for proxy Pods. This
|
||||
// is a legacy mechanism for cluster resource configuration options -
|
||||
// going forward use ProxyClass.
|
||||
@@ -996,8 +1011,8 @@ func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger
|
||||
}
|
||||
|
||||
// proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass,
|
||||
// returns a list of reconcile requests for all Connectors that have
|
||||
// .spec.proxyClass set.
|
||||
// returns a list of reconcile requests for all ProxyGroups that have
|
||||
// .spec.proxyClass set to that ProxyClass.
|
||||
func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
pgList := new(tsapi.ProxyGroupList)
|
||||
@@ -1016,6 +1031,37 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger)
|
||||
}
|
||||
}
|
||||
|
||||
// serviceAccountHandlerForProxyGroup returns a handler that, for a given ServiceAccount,
|
||||
// returns a list of reconcile requests for all ProxyGroups that use that ServiceAccount.
|
||||
// For most ProxyGroups, this will be a dedicated ServiceAccount owned by a specific
|
||||
// ProxyGroup. But for kube-apiserver ProxyGroups running in auth mode, they use a shared
|
||||
// static ServiceAccount named "kube-apiserver-auth-proxy".
|
||||
func serviceAccountHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
pgList := new(tsapi.ProxyGroupList)
|
||||
if err := cl.List(ctx, pgList); err != nil {
|
||||
logger.Debugf("error listing ProxyGroups for ServiceAccount: %v", err)
|
||||
return nil
|
||||
}
|
||||
reqs := make([]reconcile.Request, 0)
|
||||
saName := o.GetName()
|
||||
for _, pg := range pgList.Items {
|
||||
if saName == authAPIServerProxySAName && isAuthAPIServerProxy(&pg) {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)})
|
||||
}
|
||||
expectedOwner := pgOwnerReference(&pg)[0]
|
||||
saOwnerRefs := o.GetOwnerReferences()
|
||||
for _, ref := range saOwnerRefs {
|
||||
if apiequality.Semantic.DeepEqual(ref, expectedOwner) {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return reqs
|
||||
}
|
||||
}
|
||||
|
||||
// serviceHandlerForIngress returns a handler for Service events for ingress
|
||||
// reconciler that ensures that if the Service associated with an event is of
|
||||
// interest to the reconciler, the associated Ingress(es) gets be reconciled.
|
||||
|
||||
61
cmd/k8s-operator/proxy.go
Normal file
61
cmd/k8s-operator/proxy.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type apiServerProxyMode int
|
||||
|
||||
func (a apiServerProxyMode) String() string {
|
||||
switch a {
|
||||
case apiServerProxyModeDisabled:
|
||||
return "disabled"
|
||||
case apiServerProxyModeEnabled:
|
||||
return "auth"
|
||||
case apiServerProxyModeNoAuth:
|
||||
return "noauth"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
apiServerProxyModeDisabled apiServerProxyMode = iota
|
||||
apiServerProxyModeEnabled
|
||||
apiServerProxyModeNoAuth
|
||||
)
|
||||
|
||||
func parseAPIProxyMode() apiServerProxyMode {
|
||||
haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != ""
|
||||
haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != ""
|
||||
switch {
|
||||
case haveAPIProxyEnv && haveAuthProxyEnv:
|
||||
log.Fatal("AUTH_PROXY (deprecated) and APISERVER_PROXY are mutually exclusive, please unset AUTH_PROXY")
|
||||
case haveAuthProxyEnv:
|
||||
var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated
|
||||
if authProxyEnv {
|
||||
return apiServerProxyModeEnabled
|
||||
}
|
||||
return apiServerProxyModeDisabled
|
||||
case haveAPIProxyEnv:
|
||||
var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth"
|
||||
switch apiProxyEnv {
|
||||
case "true":
|
||||
return apiServerProxyModeEnabled
|
||||
case "false", "":
|
||||
return apiServerProxyModeDisabled
|
||||
case "noauth":
|
||||
return apiServerProxyModeNoAuth
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv))
|
||||
}
|
||||
}
|
||||
return apiServerProxyModeDisabled
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
dockerref "github.com/distribution/reference"
|
||||
"go.uber.org/zap"
|
||||
xslices "golang.org/x/exp/slices"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -36,9 +37,11 @@ import (
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/k8s-proxy/conf"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/mak"
|
||||
@@ -48,7 +51,9 @@ import (
|
||||
const (
|
||||
reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed"
|
||||
reasonProxyGroupReady = "ProxyGroupReady"
|
||||
reasonProxyGroupAvailable = "ProxyGroupAvailable"
|
||||
reasonProxyGroupCreating = "ProxyGroupCreating"
|
||||
reasonProxyGroupInvalid = "ProxyGroupInvalid"
|
||||
|
||||
// Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c
|
||||
optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
|
||||
@@ -63,12 +68,14 @@ const (
|
||||
//
|
||||
// tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was
|
||||
// first introduced.
|
||||
pgMinCapabilityVersion = 106
|
||||
pgMinCapabilityVersion = 106
|
||||
kubeAPIServerConfigFile = "config.hujson"
|
||||
)
|
||||
|
||||
var (
|
||||
gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount)
|
||||
gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount)
|
||||
gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount)
|
||||
gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount)
|
||||
gaugeAPIServerProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupAPIServerCount)
|
||||
)
|
||||
|
||||
// ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition.
|
||||
@@ -81,15 +88,17 @@ type ProxyGroupReconciler struct {
|
||||
|
||||
// User-specified defaults from the helm installation.
|
||||
tsNamespace string
|
||||
proxyImage string
|
||||
tsProxyImage string
|
||||
k8sProxyImage string
|
||||
defaultTags []string
|
||||
tsFirewallMode string
|
||||
defaultProxyClass string
|
||||
loginServer string
|
||||
|
||||
mu sync.Mutex // protects following
|
||||
egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge
|
||||
ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge
|
||||
mu sync.Mutex // protects following
|
||||
egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge
|
||||
ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge
|
||||
apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge
|
||||
}
|
||||
|
||||
func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger {
|
||||
@@ -170,7 +179,6 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG
|
||||
if err != nil {
|
||||
return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err)
|
||||
}
|
||||
validateProxyClassForPG(logger, pg, proxyClass)
|
||||
if !tsoperator.ProxyClassIsReady(proxyClass) {
|
||||
msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName)
|
||||
logger.Info(msg)
|
||||
@@ -178,6 +186,10 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.validate(ctx, pg, proxyClass, logger); err != nil {
|
||||
return r.notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err))
|
||||
}
|
||||
|
||||
staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), optimisticLockErrorMsg) {
|
||||
@@ -192,11 +204,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG
|
||||
return staticEndpoints, nrr, nil
|
||||
}
|
||||
|
||||
// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup.
|
||||
func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) {
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||
return
|
||||
}
|
||||
func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) error {
|
||||
// Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check
|
||||
// beig accessible on the replica Pod IP:9002. This address can also be modified by users, via
|
||||
// TS_LOCAL_ADDR_PORT env var.
|
||||
@@ -208,13 +216,70 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc
|
||||
// shouldn't need to set their own).
|
||||
//
|
||||
// TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later).
|
||||
if hasLocalAddrPortSet(pc) {
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && hasLocalAddrPortSet(pc) {
|
||||
msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+
|
||||
"This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+
|
||||
"In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+
|
||||
"Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name)
|
||||
logger.Warn(msg)
|
||||
}
|
||||
|
||||
// image is the value of pc.Spec.StatefulSet.Pod.TailscaleContainer.Image or ""
|
||||
// imagePath is a slash-delimited path ending with the image name, e.g.
|
||||
// "tailscale/tailscale" or maybe "k8s-proxy" if hosted at example.com/k8s-proxy.
|
||||
var image, imagePath string
|
||||
if pc != nil &&
|
||||
pc.Spec.StatefulSet != nil &&
|
||||
pc.Spec.StatefulSet.Pod != nil &&
|
||||
pc.Spec.StatefulSet.Pod.TailscaleContainer != nil &&
|
||||
pc.Spec.StatefulSet.Pod.TailscaleContainer.Image != "" {
|
||||
image, err := dockerref.ParseNormalizedNamed(pc.Spec.StatefulSet.Pod.TailscaleContainer.Image)
|
||||
if err != nil {
|
||||
// Shouldn't be possible as the ProxyClass won't be marked ready
|
||||
// without successfully parsing the image.
|
||||
return fmt.Errorf("error parsing %q as a container image reference: %w", pc.Spec.StatefulSet.Pod.TailscaleContainer.Image, err)
|
||||
}
|
||||
imagePath = dockerref.Path(image)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
if isAuthAPIServerProxy(pg) {
|
||||
// Validate that the static ServiceAccount already exists.
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: r.tsNamespace, Name: authAPIServerProxySAName}, sa); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error validating that ServiceAccount %q exists: %w", authAPIServerProxySAName, err)
|
||||
}
|
||||
|
||||
errs = append(errs, fmt.Errorf("the ServiceAccount %q used for the API server proxy in auth mode does not exist but "+
|
||||
"should have been created during operator installation; use apiServerProxyConfig.allowImpersonation=true "+
|
||||
"in the helm chart, or authproxy-rbac.yaml from the static manifests", authAPIServerProxySAName))
|
||||
}
|
||||
} else {
|
||||
// Validate that the ServiceAccount we create won't overwrite the static one.
|
||||
// TODO(tomhjp): This doesn't cover other controllers that could create a
|
||||
// ServiceAccount. Perhaps should have some guards to ensure that an update
|
||||
// would never change the ownership of a resource we expect to already be owned.
|
||||
if pgServiceAccountName(pg) == authAPIServerProxySAName {
|
||||
errs = append(errs, fmt.Errorf("the name of the ProxyGroup %q conflicts with the static ServiceAccount used for the API server proxy in auth mode", pg.Name))
|
||||
}
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||
if strings.HasSuffix(imagePath, "tailscale") {
|
||||
errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "k8s-proxy", pg.Spec.Type))
|
||||
}
|
||||
|
||||
if pc != nil && pc.Spec.StatefulSet != nil && pc.Spec.StatefulSet.Pod != nil && pc.Spec.StatefulSet.Pod.TailscaleInitContainer != nil {
|
||||
errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies Tailscale init container config, but ProxyGroups of type %q do not use init containers", pc.Name, pg.Spec.Type))
|
||||
}
|
||||
} else {
|
||||
if strings.HasSuffix(imagePath, "k8s-proxy") {
|
||||
errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "tailscale", pg.Spec.Type))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) {
|
||||
@@ -263,14 +328,21 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err)
|
||||
}
|
||||
}
|
||||
sa := pgServiceAccount(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
|
||||
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
|
||||
s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations
|
||||
s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err)
|
||||
|
||||
// auth mode kube-apiserver ProxyGroups use a statically created
|
||||
// ServiceAccount to keep ClusterRole creation permissions limited to the
|
||||
// helm chart installer.
|
||||
if !isAuthAPIServerProxy(pg) {
|
||||
sa := pgServiceAccount(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
|
||||
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
|
||||
s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations
|
||||
s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
role := pgRole(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
||||
r.ObjectMeta.Labels = role.ObjectMeta.Labels
|
||||
@@ -280,6 +352,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
}); err != nil {
|
||||
return r.notReadyErrf(pg, "error provisioning Role: %w", err)
|
||||
}
|
||||
|
||||
roleBinding := pgRoleBinding(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
|
||||
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
|
||||
@@ -290,6 +363,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
}); err != nil {
|
||||
return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err)
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeEgress {
|
||||
cm, hp := pgEgressCM(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||
@@ -300,6 +374,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeIngress {
|
||||
cm := pgIngressCM(pg, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) {
|
||||
@@ -309,7 +384,12 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
||||
return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass)
|
||||
|
||||
defaultImage := r.tsProxyImage
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||
defaultImage = r.k8sProxyImage
|
||||
}
|
||||
ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass)
|
||||
if err != nil {
|
||||
return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err)
|
||||
}
|
||||
@@ -371,7 +451,7 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za
|
||||
if len(devices) > 0 {
|
||||
status = metav1.ConditionTrue
|
||||
if len(devices) == desiredReplicas {
|
||||
reason = reasonProxyGroupReady
|
||||
reason = reasonProxyGroupAvailable
|
||||
}
|
||||
}
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger)
|
||||
@@ -702,17 +782,57 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||
hostname := pgHostname(pg, i)
|
||||
|
||||
for cap, cfg := range configs {
|
||||
cfgJSON, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||
if authKey == nil && existingCfgSecret != nil {
|
||||
deviceAuthed := false
|
||||
for _, d := range pg.Status.Devices {
|
||||
if d.Hostname == hostname {
|
||||
deviceAuthed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !deviceAuthed {
|
||||
existingCfg := conf.ConfigV1Alpha1{}
|
||||
if err := json.Unmarshal(existingCfgSecret.Data[kubeAPIServerConfigFile], &existingCfg); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling existing config: %w", err)
|
||||
}
|
||||
if existingCfg.AuthKey != nil {
|
||||
authKey = existingCfg.AuthKey
|
||||
}
|
||||
}
|
||||
}
|
||||
cfg := conf.VersionedConfig{
|
||||
Version: "v1alpha1",
|
||||
ConfigV1Alpha1: &conf.ConfigV1Alpha1{
|
||||
Hostname: &hostname,
|
||||
State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))),
|
||||
App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer),
|
||||
AuthKey: authKey,
|
||||
KubeAPIServer: &conf.KubeAPIServer{
|
||||
AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)),
|
||||
},
|
||||
},
|
||||
}
|
||||
cfgB, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err)
|
||||
}
|
||||
mak.Set(&cfgSecret.Data, kubeAPIServerConfigFile, cfgB)
|
||||
} else {
|
||||
configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
|
||||
for cap, cfg := range configs {
|
||||
cfgJSON, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling tailscaled config: %w", err)
|
||||
}
|
||||
mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON)
|
||||
}
|
||||
mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON)
|
||||
}
|
||||
|
||||
if existingCfgSecret != nil {
|
||||
@@ -834,9 +954,12 @@ func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGr
|
||||
r.egressProxyGroups.Add(pg.UID)
|
||||
case tsapi.ProxyGroupTypeIngress:
|
||||
r.ingressProxyGroups.Add(pg.UID)
|
||||
case tsapi.ProxyGroupTypeKubernetesAPIServer:
|
||||
r.apiServerProxyGroups.Add(pg.UID)
|
||||
}
|
||||
gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len()))
|
||||
gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len()))
|
||||
gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len()))
|
||||
}
|
||||
|
||||
// ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the
|
||||
@@ -847,9 +970,12 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro
|
||||
r.egressProxyGroups.Remove(pg.UID)
|
||||
case tsapi.ProxyGroupTypeIngress:
|
||||
r.ingressProxyGroups.Remove(pg.UID)
|
||||
case tsapi.ProxyGroupTypeKubernetesAPIServer:
|
||||
r.apiServerProxyGroups.Remove(pg.UID)
|
||||
}
|
||||
gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len()))
|
||||
gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len()))
|
||||
gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len()))
|
||||
}
|
||||
|
||||
func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) {
|
||||
@@ -858,7 +984,7 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)),
|
||||
Hostname: ptr.To(pgHostname(pg, idx)),
|
||||
AdvertiseServices: oldAdvertiseServices,
|
||||
AuthKey: authKey,
|
||||
}
|
||||
@@ -867,10 +993,6 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a
|
||||
conf.ServerURL = &loginServer
|
||||
}
|
||||
|
||||
if pg.Spec.HostnamePrefix != "" {
|
||||
conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx))
|
||||
}
|
||||
|
||||
if shouldAcceptRoutes(pc) {
|
||||
conf.AcceptRoutes = "true"
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -28,6 +29,9 @@ const (
|
||||
// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully.
|
||||
deletionGracePeriodSeconds int64 = 360
|
||||
staticEndpointPortName = "static-endpoint-port"
|
||||
// authAPIServerProxySAName is the ServiceAccount deployed by the helm chart
|
||||
// if apiServerProxy.authEnabled is true.
|
||||
authAPIServerProxySAName = "kube-apiserver-auth-proxy"
|
||||
)
|
||||
|
||||
func pgNodePortServiceName(proxyGroupName string, replica int32) string {
|
||||
@@ -61,6 +65,9 @@ func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *cor
|
||||
// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be
|
||||
// applied over the top after.
|
||||
func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) {
|
||||
if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||
return kubeAPIServerStatefulSet(pg, namespace, image)
|
||||
}
|
||||
ss := new(appsv1.StatefulSet)
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
@@ -167,6 +174,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
Value: "$(POD_NAME)",
|
||||
},
|
||||
{
|
||||
// TODO(tomhjp): This is tsrecorder-specific and does nothing. Delete.
|
||||
Name: "TS_STATE",
|
||||
Value: "kube:$(POD_NAME)",
|
||||
},
|
||||
@@ -264,9 +272,124 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string
|
||||
// gracefully.
|
||||
ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds)
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) {
|
||||
sts := &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pg.Name,
|
||||
Namespace: namespace,
|
||||
Labels: pgLabels(pg.Name, nil),
|
||||
OwnerReferences: pgOwnerReference(pg),
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: ptr.To(pgReplicas(pg)),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: pgLabels(pg.Name, nil),
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pg.Name,
|
||||
Namespace: namespace,
|
||||
Labels: pgLabels(pg.Name, nil),
|
||||
DeletionGracePeriodSeconds: ptr.To[int64](10),
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: pgServiceAccountName(pg),
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: mainContainerName,
|
||||
Image: image,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
// Used as default hostname and in Secret names.
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Used by kubeclient to post Events about the Pod's lifecycle.
|
||||
Name: "POD_UID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Used in an interpolated env var if metrics enabled.
|
||||
Name: "POD_IP",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Included for completeness with POD_IP and easier backwards compatibility in future.
|
||||
Name: "POD_IPS",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "status.podIPs",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "TS_K8S_PROXY_CONFIG",
|
||||
Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile),
|
||||
},
|
||||
},
|
||||
VolumeMounts: func() []corev1.VolumeMount {
|
||||
var mounts []corev1.VolumeMount
|
||||
|
||||
// TODO(tomhjp): Read config directly from the Secret instead.
|
||||
for i := range pgReplicas(pg) {
|
||||
mounts = append(mounts, corev1.VolumeMount{
|
||||
Name: fmt.Sprintf("k8s-proxy-config-%d", i),
|
||||
ReadOnly: true,
|
||||
MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i),
|
||||
})
|
||||
}
|
||||
|
||||
return mounts
|
||||
}(),
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "k8s-proxy",
|
||||
ContainerPort: 443,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: func() []corev1.Volume {
|
||||
var volumes []corev1.Volume
|
||||
for i := range pgReplicas(pg) {
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: fmt.Sprintf("k8s-proxy-config-%d", i),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: pgConfigSecretName(pg.Name, i),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return volumes
|
||||
}(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return sts, nil
|
||||
}
|
||||
|
||||
func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -305,8 +428,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role {
|
||||
ResourceNames: func() (secrets []string) {
|
||||
for i := range pgReplicas(pg) {
|
||||
secrets = append(secrets,
|
||||
pgConfigSecretName(pg.Name, i), // Config with auth key.
|
||||
fmt.Sprintf("%s-%d", pg.Name, i), // State.
|
||||
pgConfigSecretName(pg.Name, i), // Config with auth key.
|
||||
pgPodName(pg.Name, i), // State.
|
||||
)
|
||||
}
|
||||
return secrets
|
||||
@@ -336,7 +459,7 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding {
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: pg.Name,
|
||||
Name: pgServiceAccountName(pg),
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
@@ -347,6 +470,27 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding {
|
||||
}
|
||||
}
|
||||
|
||||
// kube-apiserver proxies in auth mode use a static ServiceAccount. Everything
|
||||
// else uses a per-ProxyGroup ServiceAccount.
|
||||
func pgServiceAccountName(pg *tsapi.ProxyGroup) string {
|
||||
if isAuthAPIServerProxy(pg) {
|
||||
return authAPIServerProxySAName
|
||||
}
|
||||
|
||||
return pg.Name
|
||||
}
|
||||
|
||||
func isAuthAPIServerProxy(pg *tsapi.ProxyGroup) bool {
|
||||
if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer {
|
||||
return false
|
||||
}
|
||||
|
||||
// The default is auth mode.
|
||||
return pg.Spec.KubeAPIServer == nil ||
|
||||
pg.Spec.KubeAPIServer.Mode == nil ||
|
||||
*pg.Spec.KubeAPIServer.Mode == tsapi.APIServerProxyModeAuth
|
||||
}
|
||||
|
||||
func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) {
|
||||
for i := range pgReplicas(pg) {
|
||||
secrets = append(secrets, &corev1.Secret{
|
||||
@@ -418,6 +562,18 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func pgPodName(pgName string, i int32) string {
|
||||
return fmt.Sprintf("%s-%d", pgName, i)
|
||||
}
|
||||
|
||||
func pgHostname(pg *tsapi.ProxyGroup, i int32) string {
|
||||
if pg.Spec.HostnamePrefix != "" {
|
||||
return fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, i)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s-%d", pg.Name, i)
|
||||
}
|
||||
|
||||
func pgConfigSecretName(pgName string, i int32) string {
|
||||
return fmt.Sprintf("%s-%d-config", pgName, i)
|
||||
}
|
||||
|
||||
@@ -629,7 +629,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) {
|
||||
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
tsProxyImage: testProxyImage,
|
||||
defaultTags: []string{"tag:test-tag"},
|
||||
tsFirewallMode: "auto",
|
||||
defaultProxyClass: "default-pc",
|
||||
@@ -772,7 +772,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) {
|
||||
t.Run("delete_and_cleanup", func(t *testing.T) {
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
tsProxyImage: testProxyImage,
|
||||
defaultTags: []string{"tag:test-tag"},
|
||||
tsFirewallMode: "auto",
|
||||
defaultProxyClass: "default-pc",
|
||||
@@ -832,7 +832,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
tsProxyImage: testProxyImage,
|
||||
defaultTags: []string{"tag:test-tag"},
|
||||
tsFirewallMode: "auto",
|
||||
defaultProxyClass: "default-pc",
|
||||
@@ -915,7 +915,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
})
|
||||
@@ -934,7 +934,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
addNodeIDToStateSecrets(t, fc, pg)
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{
|
||||
Hostname: "hostname-nodeid-2",
|
||||
TailnetIPs: []string{"1.2.3.4", "::1"},
|
||||
@@ -952,7 +952,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
|
||||
pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device.
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, pg)
|
||||
expectProxyGroupResources(t, fc, pg, true, pc)
|
||||
})
|
||||
@@ -1025,12 +1025,12 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
|
||||
zl, _ := zap.NewDevelopment()
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
Client: fc,
|
||||
l: zl.Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
tsNamespace: tsNamespace,
|
||||
tsProxyImage: testProxyImage,
|
||||
Client: fc,
|
||||
l: zl.Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
}
|
||||
|
||||
t.Run("egress_type", func(t *testing.T) {
|
||||
@@ -1047,7 +1047,7 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
mustCreate(t, fc, pg)
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
verifyProxyGroupCounts(t, reconciler, 0, 1)
|
||||
verifyProxyGroupCounts(t, reconciler, 0, 1, 0)
|
||||
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
@@ -1161,7 +1161,7 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
verifyProxyGroupCounts(t, reconciler, 1, 2)
|
||||
verifyProxyGroupCounts(t, reconciler, 1, 2, 0)
|
||||
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
@@ -1198,6 +1198,44 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
t.Errorf("unexpected volume mounts (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("kubernetes_api_server_type", func(t *testing.T) {
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-k8s-apiserver",
|
||||
UID: "test-k8s-apiserver-uid",
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
Replicas: ptr.To[int32](2),
|
||||
KubeAPIServer: &tsapi.KubeAPIServerConfig{
|
||||
Mode: ptr.To(tsapi.APIServerProxyModeNoAuth),
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := fc.Create(t.Context(), pg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectReconciled(t, reconciler, "", pg.Name)
|
||||
verifyProxyGroupCounts(t, reconciler, 1, 2, 1)
|
||||
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil {
|
||||
t.Fatalf("failed to get StatefulSet: %v", err)
|
||||
}
|
||||
|
||||
// Verify the StatefulSet configuration for KubernetesAPIServer type.
|
||||
if sts.Spec.Template.Spec.Containers[0].Name != mainContainerName {
|
||||
t.Errorf("unexpected container name %s, want %s", sts.Spec.Template.Spec.Containers[0].Name, mainContainerName)
|
||||
}
|
||||
if sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort != 443 {
|
||||
t.Errorf("unexpected container port %d, want 443", sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort)
|
||||
}
|
||||
if sts.Spec.Template.Spec.Containers[0].Ports[0].Name != "k8s-proxy" {
|
||||
t.Errorf("unexpected port name %s, want k8s-proxy", sts.Spec.Template.Spec.Containers[0].Ports[0].Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
@@ -1206,12 +1244,12 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
WithStatusSubresource(&tsapi.ProxyGroup{}).
|
||||
Build()
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
Client: fc,
|
||||
l: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
tsNamespace: tsNamespace,
|
||||
tsProxyImage: testProxyImage,
|
||||
Client: fc,
|
||||
l: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
}
|
||||
|
||||
existingServices := []string{"svc1", "svc2"}
|
||||
@@ -1272,6 +1310,170 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateProxyGroup(t *testing.T) {
|
||||
type testCase struct {
|
||||
typ tsapi.ProxyGroupType
|
||||
pgName string
|
||||
image string
|
||||
noauth bool
|
||||
initContainer bool
|
||||
staticSAExists bool
|
||||
expectedErrs int
|
||||
}
|
||||
|
||||
for name, tc := range map[string]testCase{
|
||||
"default_ingress": {
|
||||
typ: tsapi.ProxyGroupTypeIngress,
|
||||
},
|
||||
"default_kube": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
},
|
||||
"default_kube_noauth": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
noauth: true,
|
||||
// Does not require the static ServiceAccount to exist.
|
||||
},
|
||||
"kube_static_sa_missing": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: false,
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"kube_noauth_would_overwrite_static_sa": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
noauth: true,
|
||||
pgName: authAPIServerProxySAName,
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"ingress_would_overwrite_static_sa": {
|
||||
typ: tsapi.ProxyGroupTypeIngress,
|
||||
staticSAExists: true,
|
||||
pgName: authAPIServerProxySAName,
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"tailscale_image_for_kube_pg_1": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
image: "example.com/tailscale/tailscale",
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"tailscale_image_for_kube_pg_2": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
image: "example.com/tailscale",
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"tailscale_image_for_kube_pg_3": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
image: "example.com/tailscale/tailscale:latest",
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"tailscale_image_for_kube_pg_4": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
image: "tailscale/tailscale",
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"k8s_proxy_image_for_ingress_pg": {
|
||||
typ: tsapi.ProxyGroupTypeIngress,
|
||||
image: "example.com/k8s-proxy",
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"init_container_for_kube_pg": {
|
||||
typ: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||
staticSAExists: true,
|
||||
initContainer: true,
|
||||
expectedErrs: 1,
|
||||
},
|
||||
"init_container_for_ingress_pg": {
|
||||
typ: tsapi.ProxyGroupTypeIngress,
|
||||
initContainer: true,
|
||||
},
|
||||
"init_container_for_egress_pg": {
|
||||
typ: tsapi.ProxyGroupTypeEgress,
|
||||
initContainer: true,
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
pc := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "some-pc",
|
||||
},
|
||||
Spec: tsapi.ProxyClassSpec{
|
||||
StatefulSet: &tsapi.StatefulSet{
|
||||
Pod: &tsapi.Pod{},
|
||||
},
|
||||
},
|
||||
}
|
||||
if tc.image != "" {
|
||||
pc.Spec.StatefulSet.Pod.TailscaleContainer = &tsapi.Container{
|
||||
Image: tc.image,
|
||||
}
|
||||
}
|
||||
if tc.initContainer {
|
||||
pc.Spec.StatefulSet.Pod.TailscaleInitContainer = &tsapi.Container{}
|
||||
}
|
||||
pgName := "some-pg"
|
||||
if tc.pgName != "" {
|
||||
pgName = tc.pgName
|
||||
}
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pgName,
|
||||
},
|
||||
Spec: tsapi.ProxyGroupSpec{
|
||||
Type: tc.typ,
|
||||
},
|
||||
}
|
||||
if tc.noauth {
|
||||
pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{
|
||||
Mode: ptr.To(tsapi.APIServerProxyModeNoAuth),
|
||||
}
|
||||
}
|
||||
|
||||
var objs []client.Object
|
||||
if tc.staticSAExists {
|
||||
objs = append(objs, &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: authAPIServerProxySAName,
|
||||
Namespace: tsNamespace,
|
||||
},
|
||||
})
|
||||
}
|
||||
r := ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
Client: fake.NewClientBuilder().
|
||||
WithObjects(objs...).
|
||||
Build(),
|
||||
}
|
||||
|
||||
logger, _ := zap.NewDevelopment()
|
||||
err := r.validate(t.Context(), pg, pc, logger.Sugar())
|
||||
if tc.expectedErrs == 0 {
|
||||
if err != nil {
|
||||
t.Fatalf("expected no errors, got: %v", err)
|
||||
}
|
||||
// Test finished.
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("expected %d errors, got none", tc.expectedErrs)
|
||||
}
|
||||
|
||||
type unwrapper interface {
|
||||
Unwrap() []error
|
||||
}
|
||||
errs := err.(unwrapper)
|
||||
if len(errs.Unwrap()) != tc.expectedErrs {
|
||||
t.Fatalf("expected %d errors, got %d: %v", tc.expectedErrs, len(errs.Unwrap()), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) {
|
||||
pcLEStaging := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1326,7 +1528,7 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s
|
||||
return pc
|
||||
}
|
||||
|
||||
func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) {
|
||||
func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress, wantAPIServer int) {
|
||||
t.Helper()
|
||||
if r.ingressProxyGroups.Len() != wantIngress {
|
||||
t.Errorf("expected %d ingress proxy groups, got %d", wantIngress, r.ingressProxyGroups.Len())
|
||||
@@ -1334,6 +1536,9 @@ func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress,
|
||||
if r.egressProxyGroups.Len() != wantEgress {
|
||||
t.Errorf("expected %d egress proxy groups, got %d", wantEgress, r.egressProxyGroups.Len())
|
||||
}
|
||||
if r.apiServerProxyGroups.Len() != wantAPIServer {
|
||||
t.Errorf("expected %d kube-apiserver proxy groups, got %d", wantAPIServer, r.apiServerProxyGroups.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue string) {
|
||||
@@ -1512,7 +1717,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) {
|
||||
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
proxyImage: testProxyImage,
|
||||
tsProxyImage: testProxyImage,
|
||||
defaultTags: []string{"tag:test"},
|
||||
defaultProxyClass: tt.defaultProxyClass,
|
||||
Client: fc,
|
||||
|
||||
@@ -102,6 +102,8 @@ const (
|
||||
defaultLocalAddrPort = 9002 // metrics and health check port
|
||||
|
||||
letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
mainContainerName = "tailscale"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -761,7 +763,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
||||
}
|
||||
if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) {
|
||||
for i, c := range ss.Spec.Template.Spec.Containers {
|
||||
if c.Name == "tailscale" {
|
||||
if isMainContainer(&c) {
|
||||
ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
|
||||
Name: "TS_DEBUG_ACME_DIRECTORY_URL",
|
||||
Value: letsEncryptStagingEndpoint,
|
||||
@@ -829,7 +831,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
||||
return base
|
||||
}
|
||||
for i, c := range ss.Spec.Template.Spec.Containers {
|
||||
if c.Name == "tailscale" {
|
||||
if isMainContainer(&c) {
|
||||
ss.Spec.Template.Spec.Containers[i] = updateContainer(wantsPod.TailscaleContainer, ss.Spec.Template.Spec.Containers[i])
|
||||
break
|
||||
}
|
||||
@@ -847,7 +849,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
||||
|
||||
func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) {
|
||||
for i, c := range ss.Spec.Template.Spec.Containers {
|
||||
if c.Name == "tailscale" {
|
||||
if isMainContainer(&c) {
|
||||
if debug {
|
||||
ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env,
|
||||
// Serve tailscaled's debug metrics on on
|
||||
@@ -902,6 +904,10 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func isMainContainer(c *corev1.Container) bool {
|
||||
return c.Name == mainContainerName
|
||||
}
|
||||
|
||||
// tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy
|
||||
// state and auth key and returns tailscaled config files for currently supported proxy versions.
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) {
|
||||
|
||||
@@ -60,7 +60,6 @@ type HAServiceReconciler struct {
|
||||
recorder record.EventRecorder
|
||||
logger *zap.SugaredLogger
|
||||
tsClient tsClient
|
||||
tsnetServer tsnetServer
|
||||
tsNamespace string
|
||||
lc localClient
|
||||
defaultTags []string
|
||||
@@ -221,7 +220,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin
|
||||
// This checks and ensures that Tailscale Service's owner references are updated
|
||||
// for this Service and errors if that is not possible (i.e. because it
|
||||
// appears that the Tailscale Service has been created by a non-operator actor).
|
||||
updatedAnnotations, err := r.ownerAnnotations(existingTSSvc)
|
||||
updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc)
|
||||
if err != nil {
|
||||
instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname)
|
||||
msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr)
|
||||
@@ -395,7 +394,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string,
|
||||
|
||||
serviceName := tailcfg.ServiceName("svc:" + hostname)
|
||||
// 1. Clean up the Tailscale Service.
|
||||
svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger)
|
||||
svcChanged, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error deleting Tailscale Service: %w", err)
|
||||
}
|
||||
@@ -456,7 +455,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG
|
||||
return false, fmt.Errorf("failed to update tailscaled config services: %w", err)
|
||||
}
|
||||
|
||||
svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger)
|
||||
svcsChanged, err = cleanupTailscaleService(ctx, r.tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err)
|
||||
}
|
||||
@@ -529,8 +528,8 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er
|
||||
// If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference.
|
||||
// If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing.
|
||||
// It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred.
|
||||
func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) {
|
||||
svc, err := r.tsClient.GetVIPService(ctx, name)
|
||||
func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) {
|
||||
svc, err := tsClient.GetVIPService(ctx, name)
|
||||
if isErrorFeatureFlagNotEnabled(err) {
|
||||
msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled)
|
||||
logger.Warn(msg)
|
||||
@@ -563,14 +562,14 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name
|
||||
// cluster before deleting the Ingress. Perhaps the comparison could be
|
||||
// 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'.
|
||||
ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool {
|
||||
return or.OperatorID == r.operatorID
|
||||
return or.OperatorID == operatorID
|
||||
})
|
||||
if ix == -1 {
|
||||
return false, nil
|
||||
}
|
||||
if len(o.OwnerRefs) == 1 {
|
||||
logger.Infof("Deleting Tailscale Service %q", name)
|
||||
return false, r.tsClient.DeleteVIPService(ctx, name)
|
||||
return false, tsClient.DeleteVIPService(ctx, name)
|
||||
}
|
||||
o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1)
|
||||
logger.Infof("Updating Tailscale Service %q", name)
|
||||
@@ -579,7 +578,7 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name
|
||||
return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err)
|
||||
}
|
||||
svc.Annotations[ownerAnnotation] = string(json)
|
||||
return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc)
|
||||
return true, tsClient.CreateOrUpdateVIPService(ctx, svc)
|
||||
}
|
||||
|
||||
func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) {
|
||||
@@ -742,49 +741,6 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// ownerAnnotations returns the updated annotations required to ensure this
|
||||
// instance of the operator is included as an owner. If the Tailscale Service is not
|
||||
// nil, but does not contain an owner we return an error as this likely means
|
||||
// that the Tailscale Service was created by something other than a Tailscale
|
||||
// Kubernetes operator.
|
||||
func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) {
|
||||
ref := OwnerRef{
|
||||
OperatorID: r.operatorID,
|
||||
}
|
||||
if svc == nil {
|
||||
c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}}
|
||||
json, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err)
|
||||
}
|
||||
return map[string]string{
|
||||
ownerAnnotation: string(json),
|
||||
}, nil
|
||||
}
|
||||
o, err := parseOwnerAnnotation(svc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o == nil || len(o.OwnerRefs) == 0 {
|
||||
return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name)
|
||||
}
|
||||
if slices.Contains(o.OwnerRefs, ref) { // up to date
|
||||
return svc.Annotations, nil
|
||||
}
|
||||
o.OwnerRefs = append(o.OwnerRefs, ref)
|
||||
json, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling updated owner references: %w", err)
|
||||
}
|
||||
|
||||
newAnnots := make(map[string]string, len(svc.Annotations)+1)
|
||||
for k, v := range svc.Annotations {
|
||||
newAnnots[k] = v
|
||||
}
|
||||
newAnnots[ownerAnnotation] = string(json)
|
||||
return newAnnots, nil
|
||||
}
|
||||
|
||||
// dnsNameForService returns the DNS name for the given Tailscale Service name.
|
||||
func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) {
|
||||
s := svc.WithoutPrefix()
|
||||
|
||||
@@ -187,7 +187,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
||||
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
@@ -210,7 +209,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
||||
clock: cl,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
tsNamespace: "operator-ns",
|
||||
tsnetServer: fakeTsnetServer,
|
||||
logger: zl.Sugar(),
|
||||
recorder: record.NewFakeRecorder(10),
|
||||
lc: lc,
|
||||
|
||||
197
cmd/k8s-proxy/k8s-proxy.go
Normal file
197
cmd/k8s-proxy/k8s-proxy.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
// k8s-proxy proxies between tailnet and Kubernetes cluster traffic.
|
||||
// Currently, it only supports proxying tailnet clients to the Kubernetes API
|
||||
// server.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store"
|
||||
apiproxy "tailscale.com/k8s-operator/api-proxy"
|
||||
"tailscale.com/kube/k8s-proxy/conf"
|
||||
"tailscale.com/kube/state"
|
||||
"tailscale.com/tsnet"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger := zap.Must(zap.NewProduction()).Sugar()
|
||||
defer logger.Sync()
|
||||
if err := run(logger); err != nil {
|
||||
logger.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func run(logger *zap.SugaredLogger) error {
|
||||
var (
|
||||
configFile = os.Getenv("TS_K8S_PROXY_CONFIG")
|
||||
podUID = os.Getenv("POD_UID")
|
||||
)
|
||||
if configFile == "" {
|
||||
return errors.New("TS_K8S_PROXY_CONFIG unset")
|
||||
}
|
||||
|
||||
// TODO(tomhjp): Support reloading config.
|
||||
// TODO(tomhjp): Support reading config from a Secret.
|
||||
cfg, err := conf.Load(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading config file %q: %w", configFile, err)
|
||||
}
|
||||
|
||||
if cfg.Parsed.LogLevel != nil {
|
||||
level, err := zapcore.ParseLevel(*cfg.Parsed.LogLevel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing log level %q: %w", *cfg.Parsed.LogLevel, err)
|
||||
}
|
||||
logger = logger.WithOptions(zap.IncreaseLevel(level))
|
||||
}
|
||||
|
||||
if cfg.Parsed.App != nil {
|
||||
hostinfo.SetApp(*cfg.Parsed.App)
|
||||
}
|
||||
|
||||
st, err := getStateStore(cfg.Parsed.State, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If Pod UID unset, assume we're running outside of a cluster/not managed
|
||||
// by the operator, so no need to set additional state keys.
|
||||
if podUID != "" {
|
||||
if err := state.SetInitialKeys(st, podUID); err != nil {
|
||||
return fmt.Errorf("error setting initial state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var authKey string
|
||||
if cfg.Parsed.AuthKey != nil {
|
||||
authKey = *cfg.Parsed.AuthKey
|
||||
}
|
||||
|
||||
ts := &tsnet.Server{
|
||||
Logf: logger.Named("tsnet").Debugf,
|
||||
UserLogf: logger.Named("tsnet").Infof,
|
||||
Store: st,
|
||||
AuthKey: authKey,
|
||||
}
|
||||
if cfg.Parsed.Hostname != nil {
|
||||
ts.Hostname = *cfg.Parsed.Hostname
|
||||
}
|
||||
|
||||
// ctx to live for the lifetime of the process.
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
// Make sure we crash loop if Up doesn't complete in reasonable time.
|
||||
upCtx, upCancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer upCancel()
|
||||
if _, err := ts.Up(upCtx); err != nil {
|
||||
return fmt.Errorf("error starting tailscale server: %w", err)
|
||||
}
|
||||
defer ts.Close()
|
||||
|
||||
group, groupCtx := errgroup.WithContext(ctx)
|
||||
|
||||
// Setup for updating state keys.
|
||||
if podUID != "" {
|
||||
lc, err := ts.LocalClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting local client: %w", err)
|
||||
}
|
||||
w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error watching IPN bus: %w", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
group.Go(func() error {
|
||||
if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() {
|
||||
return fmt.Errorf("error keeping state keys updated: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Setup for the API server proxy.
|
||||
restConfig, err := getRestConfig(logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting rest config: %w", err)
|
||||
}
|
||||
authMode := true
|
||||
if cfg.Parsed.KubeAPIServer != nil {
|
||||
v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get()
|
||||
if ok {
|
||||
authMode = v
|
||||
}
|
||||
}
|
||||
ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating api server proxy: %w", err)
|
||||
}
|
||||
|
||||
// TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not,
|
||||
// and possibly issue certs upfront here before serving.
|
||||
group.Go(func() error {
|
||||
if err := ap.Run(groupCtx); err != nil {
|
||||
return fmt.Errorf("error running API server proxy: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) {
|
||||
p := "mem:"
|
||||
if path != nil {
|
||||
p = *path
|
||||
} else {
|
||||
logger.Warn("No state Secret provided; using in-memory store, which will lose state on restart")
|
||||
}
|
||||
st, err := store.New(logger.Errorf, p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating state store: %w", err)
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) {
|
||||
restConfig, err := rest.InClusterConfig()
|
||||
switch err {
|
||||
case nil:
|
||||
return restConfig, nil
|
||||
case rest.ErrNotInCluster:
|
||||
logger.Info("Not running in-cluster, falling back to kubeconfig")
|
||||
default:
|
||||
return nil, fmt.Errorf("error getting in-cluster config: %w", err)
|
||||
}
|
||||
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, nil)
|
||||
restConfig, err = clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
return restConfig, nil
|
||||
}
|
||||
Reference in New Issue
Block a user