mirror of
https://github.com/tailscale/tailscale.git
synced 2024-12-04 15:35:38 +00:00
cmd/k8s-operator,k8s-operator,go.mod: optionally create ServiceMonitor (#14248)
* cmd/k8s-operator,k8s-operator,go.mod: optionally create ServiceMonitor Adds a new spec.metrics.serviceMonitor field to ProxyClass. If that's set to true (and metrics are enabled), the operator will create a Prometheus ServiceMonitor for each proxy to which the ProxyClass applies. Additionally, create a metrics Service for each proxy that has metrics enabled. Updates tailscale/tailscale#11292 Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
parent
eabb424275
commit
9f9063e624
@ -189,6 +189,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
|||||||
isExitNode: cn.Spec.ExitNode,
|
isExitNode: cn.Spec.ExitNode,
|
||||||
},
|
},
|
||||||
ProxyClassName: proxyClass,
|
ProxyClassName: proxyClass,
|
||||||
|
proxyType: proxyTypeConnector,
|
||||||
}
|
}
|
||||||
|
|
||||||
if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
||||||
@ -253,7 +254,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
|
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
|
||||||
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector")); err != nil {
|
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil {
|
||||||
return false, fmt.Errorf("failed to cleanup Connector resources: %w", err)
|
return false, fmt.Errorf("failed to cleanup Connector resources: %w", err)
|
||||||
} else if !done {
|
} else if !done {
|
||||||
logger.Debugf("Connector cleanup not done yet, waiting for next reconcile")
|
logger.Debugf("Connector cleanup not done yet, waiting for next reconcile")
|
||||||
|
@ -378,7 +378,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
|||||||
k8s.io/api/storage/v1beta1 from k8s.io/client-go/applyconfigurations/storage/v1beta1+
|
k8s.io/api/storage/v1beta1 from k8s.io/client-go/applyconfigurations/storage/v1beta1+
|
||||||
k8s.io/api/storagemigration/v1alpha1 from k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1+
|
k8s.io/api/storagemigration/v1alpha1 from k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1+
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||||
💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion
|
💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion+
|
||||||
k8s.io/apimachinery/pkg/api/equality from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+
|
k8s.io/apimachinery/pkg/api/equality from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+
|
||||||
k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+
|
k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+
|
||||||
k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+
|
k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+
|
||||||
|
@ -30,6 +30,10 @@ rules:
|
|||||||
- apiGroups: ["tailscale.com"]
|
- apiGroups: ["tailscale.com"]
|
||||||
resources: ["recorders", "recorders/status"]
|
resources: ["recorders", "recorders/status"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["apiextensions.k8s.io"]
|
||||||
|
resources: ["customresourcedefinitions"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
resourceNames: ["servicemonitors.monitoring.coreos.com"]
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
@ -65,6 +69,9 @@ rules:
|
|||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||||
resources: ["roles", "rolebindings"]
|
resources: ["roles", "rolebindings"]
|
||||||
verbs: ["get", "create", "patch", "update", "list", "watch"]
|
verbs: ["get", "create", "patch", "update", "list", "watch"]
|
||||||
|
- apiGroups: ["monitoring.coreos.com"]
|
||||||
|
resources: ["servicemonitors"]
|
||||||
|
verbs: ["get", "list", "update", "create", "delete"]
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
|
@ -74,6 +74,8 @@ spec:
|
|||||||
description: |-
|
description: |-
|
||||||
Setting enable to true will make the proxy serve Tailscale metrics
|
Setting enable to true will make the proxy serve Tailscale metrics
|
||||||
at <pod-ip>:9002/metrics.
|
at <pod-ip>:9002/metrics.
|
||||||
|
A metrics Service named <proxy-statefulset>-metrics will also be created in the operator's namespace and will
|
||||||
|
serve the metrics at <service-ip>:9002/metrics.
|
||||||
|
|
||||||
In 1.78.x and 1.80.x, this field also serves as the default value for
|
In 1.78.x and 1.80.x, this field also serves as the default value for
|
||||||
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
||||||
@ -81,6 +83,25 @@ spec:
|
|||||||
|
|
||||||
Defaults to false.
|
Defaults to false.
|
||||||
type: boolean
|
type: boolean
|
||||||
|
serviceMonitor:
|
||||||
|
description: |-
|
||||||
|
Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
|
||||||
|
The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
|
||||||
|
The ingested metrics for each Service monitor will have labels to identify the proxy:
|
||||||
|
ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
|
||||||
|
ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
|
||||||
|
ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
|
||||||
|
job: ts_<proxy type>_[<parent namespace>]_<parent_name>
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- enable
|
||||||
|
properties:
|
||||||
|
enable:
|
||||||
|
description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
|
||||||
|
type: boolean
|
||||||
|
x-kubernetes-validations:
|
||||||
|
- rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)'
|
||||||
|
message: ServiceMonitor can only be enabled if metrics are enabled
|
||||||
statefulSet:
|
statefulSet:
|
||||||
description: |-
|
description: |-
|
||||||
Configuration parameters for the proxy's StatefulSet. Tailscale
|
Configuration parameters for the proxy's StatefulSet. Tailscale
|
||||||
|
@ -541,6 +541,8 @@ spec:
|
|||||||
description: |-
|
description: |-
|
||||||
Setting enable to true will make the proxy serve Tailscale metrics
|
Setting enable to true will make the proxy serve Tailscale metrics
|
||||||
at <pod-ip>:9002/metrics.
|
at <pod-ip>:9002/metrics.
|
||||||
|
A metrics Service named <proxy-statefulset>-metrics will also be created in the operator's namespace and will
|
||||||
|
serve the metrics at <service-ip>:9002/metrics.
|
||||||
|
|
||||||
In 1.78.x and 1.80.x, this field also serves as the default value for
|
In 1.78.x and 1.80.x, this field also serves as the default value for
|
||||||
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
||||||
@ -548,9 +550,28 @@ spec:
|
|||||||
|
|
||||||
Defaults to false.
|
Defaults to false.
|
||||||
type: boolean
|
type: boolean
|
||||||
|
serviceMonitor:
|
||||||
|
description: |-
|
||||||
|
Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
|
||||||
|
The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
|
||||||
|
The ingested metrics for each Service monitor will have labels to identify the proxy:
|
||||||
|
ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
|
||||||
|
ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
|
||||||
|
ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
|
||||||
|
job: ts_<proxy type>_[<parent namespace>]_<parent_name>
|
||||||
|
properties:
|
||||||
|
enable:
|
||||||
|
description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
|
||||||
|
type: boolean
|
||||||
|
required:
|
||||||
|
- enable
|
||||||
|
type: object
|
||||||
required:
|
required:
|
||||||
- enable
|
- enable
|
||||||
type: object
|
type: object
|
||||||
|
x-kubernetes-validations:
|
||||||
|
- message: ServiceMonitor can only be enabled if metrics are enabled
|
||||||
|
rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)'
|
||||||
statefulSet:
|
statefulSet:
|
||||||
description: |-
|
description: |-
|
||||||
Configuration parameters for the proxy's StatefulSet. Tailscale
|
Configuration parameters for the proxy's StatefulSet. Tailscale
|
||||||
@ -4648,6 +4669,16 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- apiextensions.k8s.io
|
||||||
|
resourceNames:
|
||||||
|
- servicemonitors.monitoring.coreos.com
|
||||||
|
resources:
|
||||||
|
- customresourcedefinitions
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
@ -4728,6 +4759,16 @@ rules:
|
|||||||
- update
|
- update
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- monitoring.coreos.com
|
||||||
|
resources:
|
||||||
|
- servicemonitors
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- update
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
|
@ -90,7 +90,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress")); err != nil {
|
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil {
|
||||||
return fmt.Errorf("failed to cleanup: %w", err)
|
return fmt.Errorf("failed to cleanup: %w", err)
|
||||||
} else if !done {
|
} else if !done {
|
||||||
logger.Debugf("cleanup not done yet, waiting for next reconcile")
|
logger.Debugf("cleanup not done yet, waiting for next reconcile")
|
||||||
@ -268,6 +268,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||||||
Tags: tags,
|
Tags: tags,
|
||||||
ChildResourceLabels: crl,
|
ChildResourceLabels: crl,
|
||||||
ProxyClassName: proxyClass,
|
ProxyClassName: proxyClass,
|
||||||
|
proxyType: proxyTypeIngressResource,
|
||||||
}
|
}
|
||||||
|
|
||||||
if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
@ -271,3 +272,124 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) {
|
|||||||
opts.proxyClass = ""
|
opts.proxyClass = ""
|
||||||
expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
|
expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
|
||||||
|
pc := &tsapi.ProxyClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "metrics", Generation: 1},
|
||||||
|
Spec: tsapi.ProxyClassSpec{
|
||||||
|
Metrics: &tsapi.Metrics{
|
||||||
|
Enable: true,
|
||||||
|
ServiceMonitor: &tsapi.ServiceMonitor{Enable: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: tsapi.ProxyClassStatus{
|
||||||
|
Conditions: []metav1.Condition{{
|
||||||
|
Status: metav1.ConditionTrue,
|
||||||
|
Type: string(tsapi.ProxyClassReady),
|
||||||
|
ObservedGeneration: 1,
|
||||||
|
}}},
|
||||||
|
}
|
||||||
|
crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
|
||||||
|
tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
|
||||||
|
fc := fake.NewClientBuilder().
|
||||||
|
WithScheme(tsapi.GlobalScheme).
|
||||||
|
WithObjects(pc, tsIngressClass).
|
||||||
|
WithStatusSubresource(pc).
|
||||||
|
Build()
|
||||||
|
ft := &fakeTSClient{}
|
||||||
|
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||||
|
zl, err := zap.NewDevelopment()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ingR := &IngressReconciler{
|
||||||
|
Client: fc,
|
||||||
|
ssr: &tailscaleSTSReconciler{
|
||||||
|
Client: fc,
|
||||||
|
tsClient: ft,
|
||||||
|
tsnetServer: fakeTsnetServer,
|
||||||
|
defaultTags: []string{"tag:k8s"},
|
||||||
|
operatorNamespace: "operator-ns",
|
||||||
|
proxyImage: "tailscale/tailscale",
|
||||||
|
},
|
||||||
|
logger: zl.Sugar(),
|
||||||
|
}
|
||||||
|
// 1. Enable metrics- expect metrics Service to be created
|
||||||
|
ing := &networkingv1.Ingress{
|
||||||
|
TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test",
|
||||||
|
Namespace: "default",
|
||||||
|
// The apiserver is supposed to set the UID, but the fake client
|
||||||
|
// doesn't. So, set it explicitly because other code later depends
|
||||||
|
// on it being set.
|
||||||
|
UID: types.UID("1234-UID"),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"tailscale.com/proxy-class": "metrics",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: networkingv1.IngressSpec{
|
||||||
|
IngressClassName: ptr.To("tailscale"),
|
||||||
|
DefaultBackend: &networkingv1.IngressBackend{
|
||||||
|
Service: &networkingv1.IngressServiceBackend{
|
||||||
|
Name: "test",
|
||||||
|
Port: networkingv1.ServiceBackendPort{
|
||||||
|
Number: 8080,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TLS: []networkingv1.IngressTLS{
|
||||||
|
{Hosts: []string{"default-test"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mustCreate(t, fc, ing)
|
||||||
|
mustCreate(t, fc, &corev1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: corev1.ServiceSpec{
|
||||||
|
ClusterIP: "1.2.3.4",
|
||||||
|
Ports: []corev1.ServicePort{{
|
||||||
|
Port: 8080,
|
||||||
|
Name: "http"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
expectReconciled(t, ingR, "default", "test")
|
||||||
|
|
||||||
|
fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
|
||||||
|
opts := configOpts{
|
||||||
|
stsName: shortName,
|
||||||
|
secretName: fullName,
|
||||||
|
namespace: "default",
|
||||||
|
tailscaleNamespace: "operator-ns",
|
||||||
|
parentType: "ingress",
|
||||||
|
hostname: "default-test",
|
||||||
|
app: kubetypes.AppIngressResource,
|
||||||
|
enableMetrics: true,
|
||||||
|
namespaced: true,
|
||||||
|
proxyType: proxyTypeIngressResource,
|
||||||
|
}
|
||||||
|
serveConfig := &ipn.ServeConfig{
|
||||||
|
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
|
||||||
|
Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
|
||||||
|
}
|
||||||
|
opts.serveConfig = serveConfig
|
||||||
|
|
||||||
|
expectEqual(t, fc, expectedSecret(t, fc, opts), nil)
|
||||||
|
expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil)
|
||||||
|
expectEqual(t, fc, expectedMetricsService(opts), nil)
|
||||||
|
expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
|
||||||
|
// 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster
|
||||||
|
mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) {
|
||||||
|
pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true}
|
||||||
|
})
|
||||||
|
expectReconciled(t, ingR, "default", "test")
|
||||||
|
// 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created
|
||||||
|
mustCreate(t, fc, crd)
|
||||||
|
expectReconciled(t, ingR, "default", "test")
|
||||||
|
expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts))
|
||||||
|
}
|
||||||
|
272
cmd/k8s-operator/metrics_resources.go
Normal file
272
cmd/k8s-operator/metrics_resources.go
Normal file
@ -0,0 +1,272 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
labelMetricsTarget = "tailscale.com/metrics-target"
|
||||||
|
|
||||||
|
// These labels get transferred from the metrics Service to the ingested Prometheus metrics.
|
||||||
|
labelPromProxyType = "ts_proxy_type"
|
||||||
|
labelPromProxyParentName = "ts_proxy_parent_name"
|
||||||
|
labelPromProxyParentNamespace = "ts_proxy_parent_namespace"
|
||||||
|
labelPromJob = "ts_prom_job"
|
||||||
|
|
||||||
|
serviceMonitorCRD = "servicemonitors.monitoring.coreos.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceMonitor contains a subset of fields of servicemonitors.monitoring.coreos.com Custom Resource Definition.
|
||||||
|
// Duplicating it here allows us to avoid importing prometheus-operator library.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L40
|
||||||
|
type ServiceMonitor struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata"`
|
||||||
|
Spec ServiceMonitorSpec `json:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L55
|
||||||
|
type ServiceMonitorSpec struct {
|
||||||
|
// Endpoints defines the endpoints to be scraped on the selected Service(s).
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L82
|
||||||
|
Endpoints []ServiceMonitorEndpoint `json:"endpoints"`
|
||||||
|
// JobLabel is the label on the Service whose value will become the value of the Prometheus job label for the metrics ingested via this ServiceMonitor.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L66
|
||||||
|
JobLabel string `json:"jobLabel"`
|
||||||
|
// NamespaceSelector selects the namespace of Service(s) that this ServiceMonitor allows to scrape.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88
|
||||||
|
NamespaceSelector ServiceMonitorNamespaceSelector `json:"namespaceSelector,omitempty"`
|
||||||
|
// Selector is the label selector for Service(s) that this ServiceMonitor allows to scrape.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L85
|
||||||
|
Selector metav1.LabelSelector `json:"selector"`
|
||||||
|
// TargetLabels are labels on the selected Service that should be applied as Prometheus labels to the ingested metrics.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L72
|
||||||
|
TargetLabels []string `json:"targetLabels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceMonitorNamespaceSelector selects namespaces in which Prometheus operator will attempt to find Services for
|
||||||
|
// this ServiceMonitor.
|
||||||
|
// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88
|
||||||
|
type ServiceMonitorNamespaceSelector struct {
|
||||||
|
MatchNames []string `json:"matchNames,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceMonitorEndpoint defines an endpoint of Service to scrape. We only define port here. Prometheus by default
|
||||||
|
// scrapes /metrics path, which is what we want.
|
||||||
|
type ServiceMonitorEndpoint struct {
|
||||||
|
// Port is the name of the Service port that Prometheus will scrape.
|
||||||
|
Port string `json:"port,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func reconcileMetricsResources(ctx context.Context, logger *zap.SugaredLogger, opts *metricsOpts, pc *tsapi.ProxyClass, cl client.Client) error {
|
||||||
|
if opts.proxyType == proxyTypeEgress {
|
||||||
|
// Metrics are currently not being enabled for standalone egress proxies.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if pc == nil || pc.Spec.Metrics == nil || !pc.Spec.Metrics.Enable {
|
||||||
|
return maybeCleanupMetricsResources(ctx, opts, cl)
|
||||||
|
}
|
||||||
|
metricsSvc := &corev1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: metricsResourceName(opts.proxyStsName),
|
||||||
|
Namespace: opts.tsNamespace,
|
||||||
|
Labels: metricsResourceLabels(opts),
|
||||||
|
},
|
||||||
|
Spec: corev1.ServiceSpec{
|
||||||
|
Selector: opts.proxyLabels,
|
||||||
|
Type: corev1.ServiceTypeClusterIP,
|
||||||
|
Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
metricsSvc, err = createOrUpdate(ctx, cl, opts.tsNamespace, metricsSvc, func(svc *corev1.Service) {
|
||||||
|
svc.Spec.Ports = metricsSvc.Spec.Ports
|
||||||
|
svc.Spec.Selector = metricsSvc.Spec.Selector
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error ensuring metrics Service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
crdExists, err := hasServiceMonitorCRD(ctx, cl)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error verifying that %q CRD exists: %w", serviceMonitorCRD, err)
|
||||||
|
}
|
||||||
|
if !crdExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if pc.Spec.Metrics.ServiceMonitor == nil || !pc.Spec.Metrics.ServiceMonitor.Enable {
|
||||||
|
return maybeCleanupServiceMonitor(ctx, cl, opts.proxyStsName, opts.tsNamespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("ensuring ServiceMonitor for metrics Service %s/%s", metricsSvc.Namespace, metricsSvc.Name)
|
||||||
|
svcMonitor, err := newServiceMonitor(metricsSvc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating ServiceMonitor: %w", err)
|
||||||
|
}
|
||||||
|
// We don't use createOrUpdate here because that does not work with unstructured types. We also do not update
|
||||||
|
// the ServiceMonitor because it is not expected that any of its fields would change. Currently this is good
|
||||||
|
// enough, but in future we might want to add logic to create-or-update unstructured types.
|
||||||
|
err = cl.Get(ctx, client.ObjectKeyFromObject(metricsSvc), svcMonitor.DeepCopy())
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
if err := cl.Create(ctx, svcMonitor); err != nil {
|
||||||
|
return fmt.Errorf("error creating ServiceMonitor: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting ServiceMonitor: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeCleanupMetricsResources ensures that any metrics resources created for a proxy are deleted. Only metrics Service
|
||||||
|
// gets deleted explicitly because the ServiceMonitor has Service's owner reference, so gets garbage collected
|
||||||
|
// automatically.
|
||||||
|
func maybeCleanupMetricsResources(ctx context.Context, opts *metricsOpts, cl client.Client) error {
|
||||||
|
sel := metricsSvcSelector(opts.proxyLabels, opts.proxyType)
|
||||||
|
return cl.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(opts.tsNamespace), client.MatchingLabels(sel))
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeCleanupServiceMonitor cleans up any ServiceMonitor created for the named proxy StatefulSet.
|
||||||
|
func maybeCleanupServiceMonitor(ctx context.Context, cl client.Client, stsName, ns string) error {
|
||||||
|
smName := metricsResourceName(stsName)
|
||||||
|
sm := serviceMonitorTemplate(smName, ns)
|
||||||
|
u, err := serviceMonitorToUnstructured(sm)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error building ServiceMonitor: %w", err)
|
||||||
|
}
|
||||||
|
err = cl.Get(ctx, types.NamespacedName{Name: smName, Namespace: ns}, u)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return nil // nothing to do
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error verifying if ServiceMonitor %s/%s exists: %w", ns, stsName, err)
|
||||||
|
}
|
||||||
|
return cl.Delete(ctx, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newServiceMonitor takes a metrics Service created for a proxy and constructs and returns a ServiceMonitor for that
|
||||||
|
// proxy that can be applied to the kube API server.
|
||||||
|
// The ServiceMonitor is returned as Unstructured type - this allows us to avoid importing prometheus-operator API server client/schema.
|
||||||
|
func newServiceMonitor(metricsSvc *corev1.Service) (*unstructured.Unstructured, error) {
|
||||||
|
sm := serviceMonitorTemplate(metricsSvc.Name, metricsSvc.Namespace)
|
||||||
|
sm.ObjectMeta.Labels = metricsSvc.Labels
|
||||||
|
sm.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(metricsSvc, corev1.SchemeGroupVersion.WithKind("Service"))}
|
||||||
|
sm.Spec = ServiceMonitorSpec{
|
||||||
|
Selector: metav1.LabelSelector{MatchLabels: metricsSvc.Labels},
|
||||||
|
Endpoints: []ServiceMonitorEndpoint{{
|
||||||
|
Port: "metrics",
|
||||||
|
}},
|
||||||
|
NamespaceSelector: ServiceMonitorNamespaceSelector{
|
||||||
|
MatchNames: []string{metricsSvc.Namespace},
|
||||||
|
},
|
||||||
|
JobLabel: labelPromJob,
|
||||||
|
TargetLabels: []string{
|
||||||
|
labelPromProxyParentName,
|
||||||
|
labelPromProxyParentNamespace,
|
||||||
|
labelPromProxyType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return serviceMonitorToUnstructured(sm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// serviceMonitorToUnstructured takes a ServiceMonitor and converts it to Unstructured type that can be used by the c/r
|
||||||
|
// client in Kubernetes API server calls.
|
||||||
|
func serviceMonitorToUnstructured(sm *ServiceMonitor) (*unstructured.Unstructured, error) {
|
||||||
|
contents, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error converting ServiceMonitor to Unstructured: %w", err)
|
||||||
|
}
|
||||||
|
u := &unstructured.Unstructured{}
|
||||||
|
u.SetUnstructuredContent(contents)
|
||||||
|
u.SetGroupVersionKind(sm.GroupVersionKind())
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricsResourceName returns name for metrics Service and ServiceMonitor for a proxy StatefulSet.
|
||||||
|
func metricsResourceName(stsName string) string {
|
||||||
|
// Maximum length of StatefulSet name if 52 chars, so this is fine.
|
||||||
|
return fmt.Sprintf("%s-metrics", stsName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricsResourceLabels constructs labels that will be applied to metrics Service and metrics ServiceMonitor for a
|
||||||
|
// proxy.
|
||||||
|
func metricsResourceLabels(opts *metricsOpts) map[string]string {
|
||||||
|
lbls := map[string]string{
|
||||||
|
LabelManaged: "true",
|
||||||
|
labelMetricsTarget: opts.proxyStsName,
|
||||||
|
labelPromProxyType: opts.proxyType,
|
||||||
|
labelPromProxyParentName: opts.proxyLabels[LabelParentName],
|
||||||
|
}
|
||||||
|
// Include namespace label for proxies created for a namespaced type.
|
||||||
|
if isNamespacedProxyType(opts.proxyType) {
|
||||||
|
lbls[labelPromProxyParentNamespace] = opts.proxyLabels[LabelParentNamespace]
|
||||||
|
}
|
||||||
|
lbls[labelPromJob] = promJobName(opts)
|
||||||
|
return lbls
|
||||||
|
}
|
||||||
|
|
||||||
|
// promJobName constructs the value of the Prometheus job label that will apply to all metrics for a ServiceMonitor.
|
||||||
|
func promJobName(opts *metricsOpts) string {
|
||||||
|
// Include parent resource namespace for proxies created for namespaced types.
|
||||||
|
if opts.proxyType == proxyTypeIngressResource || opts.proxyType == proxyTypeIngressService {
|
||||||
|
return fmt.Sprintf("ts_%s_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentNamespace], opts.proxyLabels[LabelParentName])
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("ts_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentName])
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricsSvcSelector returns the minimum label set to uniquely identify a metrics Service for a proxy.
|
||||||
|
func metricsSvcSelector(proxyLabels map[string]string, proxyType string) map[string]string {
|
||||||
|
sel := map[string]string{
|
||||||
|
labelPromProxyType: proxyType,
|
||||||
|
labelPromProxyParentName: proxyLabels[LabelParentName],
|
||||||
|
}
|
||||||
|
// Include namespace label for proxies created for a namespaced type.
|
||||||
|
if isNamespacedProxyType(proxyType) {
|
||||||
|
sel[labelPromProxyParentNamespace] = proxyLabels[LabelParentNamespace]
|
||||||
|
}
|
||||||
|
return sel
|
||||||
|
}
|
||||||
|
|
||||||
|
// serviceMonitorTemplate returns a base ServiceMonitor type that, when converted to Unstructured, is a valid type that
|
||||||
|
// can be used in kube API server calls via the c/r client.
|
||||||
|
func serviceMonitorTemplate(name, ns string) *ServiceMonitor {
|
||||||
|
return &ServiceMonitor{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "ServiceMonitor",
|
||||||
|
APIVersion: "monitoring.coreos.com/v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricsOpts struct {
|
||||||
|
proxyStsName string // name of StatefulSet for proxy
|
||||||
|
tsNamespace string // namespace in which Tailscale is installed
|
||||||
|
proxyLabels map[string]string // labels of the proxy StatefulSet
|
||||||
|
proxyType string
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNamespacedProxyType(typ string) bool {
|
||||||
|
return typ == proxyTypeIngressResource || typ == proxyTypeIngressService
|
||||||
|
}
|
@ -24,8 +24,11 @@
|
|||||||
discoveryv1 "k8s.io/api/discovery/v1"
|
discoveryv1 "k8s.io/api/discovery/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
|
toolscache "k8s.io/client-go/tools/cache"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
@ -239,21 +242,29 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
nsFilter := cache.ByObject{
|
nsFilter := cache.ByObject{
|
||||||
Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(),
|
Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(),
|
||||||
}
|
}
|
||||||
|
// We watch the ServiceMonitor CRD to ensure that reconcilers are re-triggered if user's workflows result in the
|
||||||
|
// ServiceMonitor CRD applied after some of our resources that define ServiceMonitor creation. This selector
|
||||||
|
// ensures that we only watch the ServiceMonitor CRD and that we don't cache full contents of it.
|
||||||
|
serviceMonitorSelector := cache.ByObject{
|
||||||
|
Field: fields.SelectorFromSet(fields.Set{"metadata.name": serviceMonitorCRD}),
|
||||||
|
Transform: crdTransformer(startlog),
|
||||||
|
}
|
||||||
mgrOpts := manager.Options{
|
mgrOpts := manager.Options{
|
||||||
// TODO (irbekrm): stricter filtering what we watch/cache/call
|
// TODO (irbekrm): stricter filtering what we watch/cache/call
|
||||||
// reconcilers on. c/r by default starts a watch on any
|
// reconcilers on. c/r by default starts a watch on any
|
||||||
// resources that we GET via the controller manager's client.
|
// resources that we GET via the controller manager's client.
|
||||||
Cache: cache.Options{
|
Cache: cache.Options{
|
||||||
ByObject: map[client.Object]cache.ByObject{
|
ByObject: map[client.Object]cache.ByObject{
|
||||||
&corev1.Secret{}: nsFilter,
|
&corev1.Secret{}: nsFilter,
|
||||||
&corev1.ServiceAccount{}: nsFilter,
|
&corev1.ServiceAccount{}: nsFilter,
|
||||||
&corev1.Pod{}: nsFilter,
|
&corev1.Pod{}: nsFilter,
|
||||||
&corev1.ConfigMap{}: nsFilter,
|
&corev1.ConfigMap{}: nsFilter,
|
||||||
&appsv1.StatefulSet{}: nsFilter,
|
&appsv1.StatefulSet{}: nsFilter,
|
||||||
&appsv1.Deployment{}: nsFilter,
|
&appsv1.Deployment{}: nsFilter,
|
||||||
&discoveryv1.EndpointSlice{}: nsFilter,
|
&discoveryv1.EndpointSlice{}: nsFilter,
|
||||||
&rbacv1.Role{}: nsFilter,
|
&rbacv1.Role{}: nsFilter,
|
||||||
&rbacv1.RoleBinding{}: nsFilter,
|
&rbacv1.RoleBinding{}: nsFilter,
|
||||||
|
&apiextensionsv1.CustomResourceDefinition{}: serviceMonitorSelector,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Scheme: tsapi.GlobalScheme,
|
Scheme: tsapi.GlobalScheme,
|
||||||
@ -422,8 +433,13 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err)
|
startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that
|
||||||
|
// define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get
|
||||||
|
// reconciled if the CRD is applied at a later point.
|
||||||
|
serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log))
|
||||||
err = builder.ControllerManagedBy(mgr).
|
err = builder.ControllerManagedBy(mgr).
|
||||||
For(&tsapi.ProxyClass{}).
|
For(&tsapi.ProxyClass{}).
|
||||||
|
Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter).
|
||||||
Complete(&ProxyClassReconciler{
|
Complete(&ProxyClassReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
recorder: eventRecorder,
|
recorder: eventRecorder,
|
||||||
@ -1018,6 +1034,49 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// proxyClassesWithServiceMonitor returns an event handler that, given that the event is for the Prometheus
|
||||||
|
// ServiceMonitor CRD, returns all ProxyClasses that define that a ServiceMonitor should be created.
|
||||||
|
func proxyClassesWithServiceMonitor(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||||
|
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||||
|
crd, ok := o.(*apiextensionsv1.CustomResourceDefinition)
|
||||||
|
if !ok {
|
||||||
|
logger.Debugf("[unexpected] ServiceMonitor CRD handler received an object that is not a CustomResourceDefinition")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if crd.Name != serviceMonitorCRD {
|
||||||
|
logger.Debugf("[unexpected] ServiceMonitor CRD handler received an unexpected CRD %q", crd.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
pcl := &tsapi.ProxyClassList{}
|
||||||
|
if err := cl.List(ctx, pcl); err != nil {
|
||||||
|
logger.Debugf("[unexpected] error listing ProxyClasses: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
reqs := make([]reconcile.Request, 0)
|
||||||
|
for _, pc := range pcl.Items {
|
||||||
|
if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable {
|
||||||
|
reqs = append(reqs, reconcile.Request{
|
||||||
|
NamespacedName: types.NamespacedName{Namespace: pc.Namespace, Name: pc.Name},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reqs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// crdTransformer gets called before a CRD is stored to c/r cache, it removes the CRD spec to reduce memory consumption.
|
||||||
|
func crdTransformer(log *zap.SugaredLogger) toolscache.TransformFunc {
|
||||||
|
return func(o any) (any, error) {
|
||||||
|
crd, ok := o.(*apiextensionsv1.CustomResourceDefinition)
|
||||||
|
if !ok {
|
||||||
|
log.Infof("[unexpected] CRD transformer called for a non-CRD type")
|
||||||
|
return crd, nil
|
||||||
|
}
|
||||||
|
crd.Spec = apiextensionsv1.CustomResourceDefinitionSpec{}
|
||||||
|
return crd, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The
|
// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The
|
||||||
// index is used a list filter.
|
// index is used a list filter.
|
||||||
func indexEgressServices(o client.Object) []string {
|
func indexEgressServices(o client.Object) []string {
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
dockerref "github.com/distribution/reference"
|
dockerref "github.com/distribution/reference"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||||
@ -95,7 +96,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
|||||||
pcr.mu.Unlock()
|
pcr.mu.Unlock()
|
||||||
|
|
||||||
oldPCStatus := pc.Status.DeepCopy()
|
oldPCStatus := pc.Status.DeepCopy()
|
||||||
if errs := pcr.validate(pc); errs != nil {
|
if errs := pcr.validate(ctx, pc); errs != nil {
|
||||||
msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error())
|
msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error())
|
||||||
pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg)
|
pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg)
|
||||||
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger)
|
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger)
|
||||||
@ -111,7 +112,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
|||||||
return reconcile.Result{}, nil
|
return reconcile.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations field.ErrorList) {
|
func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) {
|
||||||
if sts := pc.Spec.StatefulSet; sts != nil {
|
if sts := pc.Spec.StatefulSet; sts != nil {
|
||||||
if len(sts.Labels) > 0 {
|
if len(sts.Labels) > 0 {
|
||||||
if errs := metavalidation.ValidateLabels(sts.Labels, field.NewPath(".spec.statefulSet.labels")); errs != nil {
|
if errs := metavalidation.ValidateLabels(sts.Labels, field.NewPath(".spec.statefulSet.labels")); errs != nil {
|
||||||
@ -167,6 +168,16 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable {
|
||||||
|
found, err := hasServiceMonitorCRD(ctx, pcr.Client)
|
||||||
|
if err != nil {
|
||||||
|
pcr.logger.Infof("[unexpected]: error retrieving %q CRD: %v", serviceMonitorCRD, err)
|
||||||
|
// best effort validation - don't error out here
|
||||||
|
} else if !found {
|
||||||
|
msg := fmt.Sprintf("ProxyClass defines that a ServiceMonitor custom resource should be created, but %q CRD was not found", serviceMonitorCRD)
|
||||||
|
violations = append(violations, field.TypeInvalid(field.NewPath("spec", "metrics", "serviceMonitor"), "enable", msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
// We do not validate embedded fields (security context, resource
|
// We do not validate embedded fields (security context, resource
|
||||||
// requirements etc) as we inherit upstream validation for those fields.
|
// requirements etc) as we inherit upstream validation for those fields.
|
||||||
// Invalid values would get rejected by upstream validations at apply
|
// Invalid values would get rejected by upstream validations at apply
|
||||||
@ -174,6 +185,16 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel
|
|||||||
return violations
|
return violations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasServiceMonitorCRD(ctx context.Context, cl client.Client) (bool, error) {
|
||||||
|
sm := &apiextensionsv1.CustomResourceDefinition{}
|
||||||
|
if err := cl.Get(ctx, types.NamespacedName{Name: serviceMonitorCRD}, sm); apierrors.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// maybeCleanup removes tailscale.com finalizer and ensures that the ProxyClass
|
// maybeCleanup removes tailscale.com finalizer and ensures that the ProxyClass
|
||||||
// is no longer counted towards k8s_proxyclass_resources.
|
// is no longer counted towards k8s_proxyclass_resources.
|
||||||
func (pcr *ProxyClassReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, pc *tsapi.ProxyClass) error {
|
func (pcr *ProxyClassReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, pc *tsapi.ProxyClass) error {
|
||||||
|
@ -8,10 +8,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
@ -134,6 +136,25 @@ func TestProxyClass(t *testing.T) {
|
|||||||
"Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."}
|
"Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."}
|
||||||
expectReconciled(t, pcr, "", "test")
|
expectReconciled(t, pcr, "", "test")
|
||||||
expectEvents(t, fr, expectedEvents)
|
expectEvents(t, fr, expectedEvents)
|
||||||
|
|
||||||
|
// 6. A ProxyClass with ServiceMonitor enabled and in a cluster that has not ServiceMonitor CRD is invalid
|
||||||
|
pc.Spec.Metrics = &tsapi.Metrics{Enable: true, ServiceMonitor: &tsapi.ServiceMonitor{Enable: true}}
|
||||||
|
mustUpdate(t, fc, "", "test", func(proxyClass *tsapi.ProxyClass) {
|
||||||
|
proxyClass.Spec = pc.Spec
|
||||||
|
})
|
||||||
|
expectReconciled(t, pcr, "", "test")
|
||||||
|
msg = `ProxyClass is not valid: spec.metrics.serviceMonitor: Invalid value: "enable": ProxyClass defines that a ServiceMonitor custom resource should be created, but "servicemonitors.monitoring.coreos.com" CRD was not found`
|
||||||
|
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar())
|
||||||
|
expectEqual(t, fc, pc, nil)
|
||||||
|
expectedEvent = "Warning ProxyClassInvalid " + msg
|
||||||
|
expectEvents(t, fr, []string{expectedEvent})
|
||||||
|
|
||||||
|
// 7. A ProxyClass with ServiceMonitor enabled and in a cluster that does have the ServiceMonitor CRD is valid
|
||||||
|
crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
|
||||||
|
mustCreate(t, fc, crd)
|
||||||
|
expectReconciled(t, pcr, "", "test")
|
||||||
|
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar())
|
||||||
|
expectEqual(t, fc, pc, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateProxyClass(t *testing.T) {
|
func TestValidateProxyClass(t *testing.T) {
|
||||||
@ -180,7 +201,7 @@ func TestValidateProxyClass(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
pcr := &ProxyClassReconciler{}
|
pcr := &ProxyClassReconciler{}
|
||||||
err := pcr.validate(tc.pc)
|
err := pcr.validate(context.Background(), tc.pc)
|
||||||
valid := err == nil
|
valid := err == nil
|
||||||
if valid != tc.valid {
|
if valid != tc.valid {
|
||||||
t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err)
|
t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err)
|
||||||
|
@ -259,6 +259,15 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("error provisioning StatefulSet: %w", err)
|
return fmt.Errorf("error provisioning StatefulSet: %w", err)
|
||||||
}
|
}
|
||||||
|
mo := &metricsOpts{
|
||||||
|
tsNamespace: r.tsNamespace,
|
||||||
|
proxyStsName: pg.Name,
|
||||||
|
proxyLabels: pgLabels(pg.Name, nil),
|
||||||
|
proxyType: "proxygroup",
|
||||||
|
}
|
||||||
|
if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil {
|
||||||
|
return fmt.Errorf("error reconciling metrics resources: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.cleanupDanglingResources(ctx, pg); err != nil {
|
if err := r.cleanupDanglingResources(ctx, pg); err != nil {
|
||||||
return fmt.Errorf("error cleaning up dangling resources: %w", err)
|
return fmt.Errorf("error cleaning up dangling resources: %w", err)
|
||||||
@ -327,6 +336,14 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mo := &metricsOpts{
|
||||||
|
proxyLabels: pgLabels(pg.Name, nil),
|
||||||
|
tsNamespace: r.tsNamespace,
|
||||||
|
proxyType: "proxygroup"}
|
||||||
|
if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil {
|
||||||
|
return false, fmt.Errorf("error cleaning up metrics resources: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
logger.Infof("cleaned up ProxyGroup resources")
|
logger.Infof("cleaned up ProxyGroup resources")
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
r.proxyGroups.Remove(pg.UID)
|
r.proxyGroups.Remove(pg.UID)
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
@ -76,6 +77,13 @@ func TestProxyGroup(t *testing.T) {
|
|||||||
l: zl.Sugar(),
|
l: zl.Sugar(),
|
||||||
clock: cl,
|
clock: cl,
|
||||||
}
|
}
|
||||||
|
crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
|
||||||
|
opts := configOpts{
|
||||||
|
proxyType: "proxygroup",
|
||||||
|
stsName: pg.Name,
|
||||||
|
parentType: "proxygroup",
|
||||||
|
tailscaleNamespace: "tailscale",
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("proxyclass_not_ready", func(t *testing.T) {
|
t.Run("proxyclass_not_ready", func(t *testing.T) {
|
||||||
expectReconciled(t, reconciler, "", pg.Name)
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
@ -190,6 +198,27 @@ func TestProxyGroup(t *testing.T) {
|
|||||||
expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74")
|
expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("enable_metrics", func(t *testing.T) {
|
||||||
|
pc.Spec.Metrics = &tsapi.Metrics{Enable: true}
|
||||||
|
mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
|
||||||
|
p.Spec = pc.Spec
|
||||||
|
})
|
||||||
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
|
expectEqual(t, fc, expectedMetricsService(opts), nil)
|
||||||
|
})
|
||||||
|
t.Run("enable_service_monitor_no_crd", func(t *testing.T) {
|
||||||
|
pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true}
|
||||||
|
mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
|
||||||
|
p.Spec.Metrics = pc.Spec.Metrics
|
||||||
|
})
|
||||||
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
|
})
|
||||||
|
t.Run("create_crd_expect_service_monitor", func(t *testing.T) {
|
||||||
|
mustCreate(t, fc, crd)
|
||||||
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
|
expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts))
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("delete_and_cleanup", func(t *testing.T) {
|
t.Run("delete_and_cleanup", func(t *testing.T) {
|
||||||
if err := fc.Delete(context.Background(), pg); err != nil {
|
if err := fc.Delete(context.Background(), pg); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -197,7 +226,7 @@ func TestProxyGroup(t *testing.T) {
|
|||||||
|
|
||||||
expectReconciled(t, reconciler, "", pg.Name)
|
expectReconciled(t, reconciler, "", pg.Name)
|
||||||
|
|
||||||
expectMissing[tsapi.Recorder](t, fc, "", pg.Name)
|
expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name)
|
||||||
if expected := 0; reconciler.proxyGroups.Len() != expected {
|
if expected := 0; reconciler.proxyGroups.Len() != expected {
|
||||||
t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len())
|
t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len())
|
||||||
}
|
}
|
||||||
@ -206,6 +235,7 @@ func TestProxyGroup(t *testing.T) {
|
|||||||
if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" {
|
if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" {
|
||||||
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
|
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
expectMissing[corev1.Service](t, reconciler, "tailscale", metricsResourceName(pg.Name))
|
||||||
// The fake client does not clean up objects whose owner has been
|
// The fake client does not clean up objects whose owner has been
|
||||||
// deleted, so we can't test for the owned resources getting deleted.
|
// deleted, so we can't test for the owned resources getting deleted.
|
||||||
})
|
})
|
||||||
|
@ -94,6 +94,12 @@
|
|||||||
podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn"
|
podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn"
|
||||||
// podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents.
|
// podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents.
|
||||||
podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash"
|
podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash"
|
||||||
|
|
||||||
|
proxyTypeEgress = "egress_service"
|
||||||
|
proxyTypeIngressService = "ingress_service"
|
||||||
|
proxyTypeIngressResource = "ingress_resource"
|
||||||
|
proxyTypeConnector = "connector"
|
||||||
|
proxyTypeProxyGroup = "proxygroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -122,6 +128,8 @@ type tailscaleSTSConfig struct {
|
|||||||
Hostname string
|
Hostname string
|
||||||
Tags []string // if empty, use defaultTags
|
Tags []string // if empty, use defaultTags
|
||||||
|
|
||||||
|
proxyType string
|
||||||
|
|
||||||
// Connector specifies a configuration of a Connector instance if that's
|
// Connector specifies a configuration of a Connector instance if that's
|
||||||
// what this StatefulSet should be created for.
|
// what this StatefulSet should be created for.
|
||||||
Connector *connector
|
Connector *connector
|
||||||
@ -197,14 +205,22 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||||
}
|
}
|
||||||
|
mo := &metricsOpts{
|
||||||
|
proxyStsName: hsvc.Name,
|
||||||
|
tsNamespace: hsvc.Namespace,
|
||||||
|
proxyLabels: hsvc.Labels,
|
||||||
|
proxyType: sts.proxyType,
|
||||||
|
}
|
||||||
|
if err = reconcileMetricsResources(ctx, logger, mo, sts.ProxyClass, a.Client); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to ensure metrics resources: %w", err)
|
||||||
|
}
|
||||||
return hsvc, nil
|
return hsvc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup removes all resources associated that were created by Provision with
|
// Cleanup removes all resources associated that were created by Provision with
|
||||||
// the given labels. It returns true when all resources have been removed,
|
// the given labels. It returns true when all resources have been removed,
|
||||||
// otherwise it returns false and the caller should retry later.
|
// otherwise it returns false and the caller should retry later.
|
||||||
func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string) (done bool, _ error) {
|
func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) {
|
||||||
// Need to delete the StatefulSet first, and delete it with foreground
|
// Need to delete the StatefulSet first, and delete it with foreground
|
||||||
// cascading deletion. That way, the pod that's writing to the Secret will
|
// cascading deletion. That way, the pod that's writing to the Secret will
|
||||||
// stop running before we start looking at the Secret's contents, and
|
// stop running before we start looking at the Secret's contents, and
|
||||||
@ -257,6 +273,14 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
mo := &metricsOpts{
|
||||||
|
proxyLabels: labels,
|
||||||
|
tsNamespace: a.operatorNamespace,
|
||||||
|
proxyType: typ,
|
||||||
|
}
|
||||||
|
if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil {
|
||||||
|
return false, fmt.Errorf("error cleaning up metrics resources: %w", err)
|
||||||
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +152,12 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc")); err != nil {
|
proxyTyp := proxyTypeEgress
|
||||||
|
if a.shouldExpose(svc) {
|
||||||
|
proxyTyp = proxyTypeIngressService
|
||||||
|
}
|
||||||
|
|
||||||
|
if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil {
|
||||||
return fmt.Errorf("failed to cleanup: %w", err)
|
return fmt.Errorf("failed to cleanup: %w", err)
|
||||||
} else if !done {
|
} else if !done {
|
||||||
logger.Debugf("cleanup not done yet, waiting for next reconcile")
|
logger.Debugf("cleanup not done yet, waiting for next reconcile")
|
||||||
@ -256,6 +261,10 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||||||
ChildResourceLabels: crl,
|
ChildResourceLabels: crl,
|
||||||
ProxyClassName: proxyClass,
|
ProxyClassName: proxyClass,
|
||||||
}
|
}
|
||||||
|
sts.proxyType = proxyTypeEgress
|
||||||
|
if a.shouldExpose(svc) {
|
||||||
|
sts.proxyType = proxyTypeIngressService
|
||||||
|
}
|
||||||
|
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
if a.shouldExposeClusterIP(svc) {
|
if a.shouldExposeClusterIP(svc) {
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -21,6 +22,7 @@
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
@ -39,7 +41,10 @@ type configOpts struct {
|
|||||||
secretName string
|
secretName string
|
||||||
hostname string
|
hostname string
|
||||||
namespace string
|
namespace string
|
||||||
|
tailscaleNamespace string
|
||||||
|
namespaced bool
|
||||||
parentType string
|
parentType string
|
||||||
|
proxyType string
|
||||||
priorityClassName string
|
priorityClassName string
|
||||||
firewallMode string
|
firewallMode string
|
||||||
tailnetTargetIP string
|
tailnetTargetIP string
|
||||||
@ -56,6 +61,7 @@ type configOpts struct {
|
|||||||
app string
|
app string
|
||||||
shouldRemoveAuthKey bool
|
shouldRemoveAuthKey bool
|
||||||
secretExtraData map[string][]byte
|
secretExtraData map[string][]byte
|
||||||
|
enableMetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet {
|
func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet {
|
||||||
@ -150,6 +156,29 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
|
|||||||
Name: "TS_INTERNAL_APP",
|
Name: "TS_INTERNAL_APP",
|
||||||
Value: opts.app,
|
Value: opts.app,
|
||||||
})
|
})
|
||||||
|
if opts.enableMetrics {
|
||||||
|
tsContainer.Env = append(tsContainer.Env,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_DEBUG_ADDR_PORT",
|
||||||
|
Value: "$(POD_IP):9001"},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_TAILSCALED_EXTRA_ARGS",
|
||||||
|
Value: "--debug=$(TS_DEBUG_ADDR_PORT)",
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_LOCAL_ADDR_PORT",
|
||||||
|
Value: "$(POD_IP):9002",
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_ENABLE_METRICS",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tsContainer.Ports = append(tsContainer.Ports,
|
||||||
|
corev1.ContainerPort{Name: "debug", ContainerPort: 9001, Protocol: "TCP"},
|
||||||
|
corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"},
|
||||||
|
)
|
||||||
|
}
|
||||||
ss := &appsv1.StatefulSet{
|
ss := &appsv1.StatefulSet{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "StatefulSet",
|
Kind: "StatefulSet",
|
||||||
@ -241,6 +270,29 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
|
|||||||
{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"},
|
{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
if opts.enableMetrics {
|
||||||
|
tsContainer.Env = append(tsContainer.Env,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_DEBUG_ADDR_PORT",
|
||||||
|
Value: "$(POD_IP):9001"},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_TAILSCALED_EXTRA_ARGS",
|
||||||
|
Value: "--debug=$(TS_DEBUG_ADDR_PORT)",
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_LOCAL_ADDR_PORT",
|
||||||
|
Value: "$(POD_IP):9002",
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_ENABLE_METRICS",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tsContainer.Ports = append(tsContainer.Ports, corev1.ContainerPort{
|
||||||
|
Name: "debug", ContainerPort: 9001, Protocol: "TCP"},
|
||||||
|
corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"},
|
||||||
|
)
|
||||||
|
}
|
||||||
volumes := []corev1.Volume{
|
volumes := []corev1.Volume{
|
||||||
{
|
{
|
||||||
Name: "tailscaledconfig",
|
Name: "tailscaledconfig",
|
||||||
@ -335,6 +387,87 @@ func expectedHeadlessService(name string, parentType string) *corev1.Service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func expectedMetricsService(opts configOpts) *corev1.Service {
|
||||||
|
labels := metricsLabels(opts)
|
||||||
|
selector := map[string]string{
|
||||||
|
"tailscale.com/managed": "true",
|
||||||
|
"tailscale.com/parent-resource": "test",
|
||||||
|
"tailscale.com/parent-resource-type": opts.parentType,
|
||||||
|
}
|
||||||
|
if opts.namespaced {
|
||||||
|
selector["tailscale.com/parent-resource-ns"] = opts.namespace
|
||||||
|
}
|
||||||
|
return &corev1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: metricsResourceName(opts.stsName),
|
||||||
|
Namespace: opts.tailscaleNamespace,
|
||||||
|
Labels: labels,
|
||||||
|
},
|
||||||
|
Spec: corev1.ServiceSpec{
|
||||||
|
Selector: selector,
|
||||||
|
Type: corev1.ServiceTypeClusterIP,
|
||||||
|
Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func metricsLabels(opts configOpts) map[string]string {
|
||||||
|
promJob := fmt.Sprintf("ts_%s_default_test", opts.proxyType)
|
||||||
|
if !opts.namespaced {
|
||||||
|
promJob = fmt.Sprintf("ts_%s_test", opts.proxyType)
|
||||||
|
}
|
||||||
|
labels := map[string]string{
|
||||||
|
"tailscale.com/managed": "true",
|
||||||
|
"tailscale.com/metrics-target": opts.stsName,
|
||||||
|
"ts_prom_job": promJob,
|
||||||
|
"ts_proxy_type": opts.proxyType,
|
||||||
|
"ts_proxy_parent_name": "test",
|
||||||
|
}
|
||||||
|
if opts.namespaced {
|
||||||
|
labels["ts_proxy_parent_namespace"] = "default"
|
||||||
|
}
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstructured {
|
||||||
|
t.Helper()
|
||||||
|
labels := metricsLabels(opts)
|
||||||
|
name := metricsResourceName(opts.stsName)
|
||||||
|
sm := &ServiceMonitor{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: opts.tailscaleNamespace,
|
||||||
|
Labels: labels,
|
||||||
|
ResourceVersion: "1",
|
||||||
|
OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}},
|
||||||
|
},
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "ServiceMonitor",
|
||||||
|
APIVersion: "monitoring.coreos.com/v1",
|
||||||
|
},
|
||||||
|
Spec: ServiceMonitorSpec{
|
||||||
|
Selector: metav1.LabelSelector{MatchLabels: labels},
|
||||||
|
Endpoints: []ServiceMonitorEndpoint{{
|
||||||
|
Port: "metrics",
|
||||||
|
}},
|
||||||
|
NamespaceSelector: ServiceMonitorNamespaceSelector{
|
||||||
|
MatchNames: []string{opts.tailscaleNamespace},
|
||||||
|
},
|
||||||
|
JobLabel: "ts_prom_job",
|
||||||
|
TargetLabels: []string{
|
||||||
|
"ts_proxy_parent_name",
|
||||||
|
"ts_proxy_parent_namespace",
|
||||||
|
"ts_proxy_type",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
u, err := serviceMonitorToUnstructured(sm)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error converting ServiceMonitor to unstructured: %v", err)
|
||||||
|
}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Secret {
|
func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Secret {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
s := &corev1.Secret{
|
s := &corev1.Secret{
|
||||||
@ -502,6 +635,21 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func expectEqualUnstructured(t *testing.T, client client.Client, want *unstructured.Unstructured) {
|
||||||
|
t.Helper()
|
||||||
|
got := &unstructured.Unstructured{}
|
||||||
|
got.SetGroupVersionKind(want.GroupVersionKind())
|
||||||
|
if err := client.Get(context.Background(), types.NamespacedName{
|
||||||
|
Name: want.GetName(),
|
||||||
|
Namespace: want.GetNamespace(),
|
||||||
|
}, got); err != nil {
|
||||||
|
t.Fatalf("getting %q: %v", want.GetName(), err)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(got, want); diff != "" {
|
||||||
|
t.Fatalf("unexpected contents of Unstructured (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) {
|
func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
obj := O(new(T))
|
obj := O(new(T))
|
||||||
|
2
go.mod
2
go.mod
@ -396,7 +396,7 @@ require (
|
|||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
howett.net/plist v1.0.0 // indirect
|
howett.net/plist v1.0.0 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.30.3 // indirect
|
k8s.io/apiextensions-apiserver v0.30.3
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||||
|
@ -326,7 +326,8 @@ _Appears in:_
|
|||||||
|
|
||||||
| Field | Description | Default | Validation |
|
| Field | Description | Default | Validation |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics<br />at <pod-ip>:9002/metrics.<br />In 1.78.x and 1.80.x, this field also serves as the default value for<br />.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both<br />fields will independently default to false.<br />Defaults to false. | | |
|
| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics<br />at <pod-ip>:9002/metrics.<br />A metrics Service named <proxy-statefulset>-metrics will also be created in the operator's namespace and will<br />serve the metrics at <service-ip>:9002/metrics.<br />In 1.78.x and 1.80.x, this field also serves as the default value for<br />.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both<br />fields will independently default to false.<br />Defaults to false. | | |
|
||||||
|
| `serviceMonitor` _[ServiceMonitor](#servicemonitor)_ | Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.<br />The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.<br />The ingested metrics for each Service monitor will have labels to identify the proxy:<br />ts_proxy_type: ingress_service\|ingress_resource\|connector\|proxygroup<br />ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)<br />ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)<br />job: ts_<proxy type>_[<parent namespace>]_<parent_name> | | |
|
||||||
|
|
||||||
|
|
||||||
#### Name
|
#### Name
|
||||||
@ -836,6 +837,22 @@ _Appears in:_
|
|||||||
| `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains<br />credentials for writing to the configured bucket. Each key-value pair<br />from the secret's data will be mounted as an environment variable. It<br />should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if<br />using a static access key. | | |
|
| `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains<br />credentials for writing to the configured bucket. Each key-value pair<br />from the secret's data will be mounted as an environment variable. It<br />should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if<br />using a static access key. | | |
|
||||||
|
|
||||||
|
|
||||||
|
#### ServiceMonitor
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_Appears in:_
|
||||||
|
- [Metrics](#metrics)
|
||||||
|
|
||||||
|
| Field | Description | Default | Validation |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `enable` _boolean_ | If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. | | |
|
||||||
|
|
||||||
|
|
||||||
#### StatefulSet
|
#### StatefulSet
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
"tailscale.com/k8s-operator/apis"
|
"tailscale.com/k8s-operator/apis"
|
||||||
|
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -39,12 +40,18 @@ func init() {
|
|||||||
localSchemeBuilder.Register(addKnownTypes)
|
localSchemeBuilder.Register(addKnownTypes)
|
||||||
|
|
||||||
GlobalScheme = runtime.NewScheme()
|
GlobalScheme = runtime.NewScheme()
|
||||||
|
// Add core types
|
||||||
if err := scheme.AddToScheme(GlobalScheme); err != nil {
|
if err := scheme.AddToScheme(GlobalScheme); err != nil {
|
||||||
panic(fmt.Sprintf("failed to add k8s.io scheme: %s", err))
|
panic(fmt.Sprintf("failed to add k8s.io scheme: %s", err))
|
||||||
}
|
}
|
||||||
|
// Add tailscale.com types
|
||||||
if err := AddToScheme(GlobalScheme); err != nil {
|
if err := AddToScheme(GlobalScheme); err != nil {
|
||||||
panic(fmt.Sprintf("failed to add tailscale.com scheme: %s", err))
|
panic(fmt.Sprintf("failed to add tailscale.com scheme: %s", err))
|
||||||
}
|
}
|
||||||
|
// Add apiextensions types (CustomResourceDefinitions/CustomResourceDefinitionLists)
|
||||||
|
if err := apiextensionsv1.AddToScheme(GlobalScheme); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to add apiextensions.k8s.io scheme: %s", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds the list of known types to api.Scheme.
|
// Adds the list of known types to api.Scheme.
|
||||||
|
@ -161,9 +161,12 @@ type Pod struct {
|
|||||||
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled"
|
||||||
type Metrics struct {
|
type Metrics struct {
|
||||||
// Setting enable to true will make the proxy serve Tailscale metrics
|
// Setting enable to true will make the proxy serve Tailscale metrics
|
||||||
// at <pod-ip>:9002/metrics.
|
// at <pod-ip>:9002/metrics.
|
||||||
|
// A metrics Service named <proxy-statefulset>-metrics will also be created in the operator's namespace and will
|
||||||
|
// serve the metrics at <service-ip>:9002/metrics.
|
||||||
//
|
//
|
||||||
// In 1.78.x and 1.80.x, this field also serves as the default value for
|
// In 1.78.x and 1.80.x, this field also serves as the default value for
|
||||||
// .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
// .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
|
||||||
@ -171,6 +174,20 @@ type Metrics struct {
|
|||||||
//
|
//
|
||||||
// Defaults to false.
|
// Defaults to false.
|
||||||
Enable bool `json:"enable"`
|
Enable bool `json:"enable"`
|
||||||
|
// Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
|
||||||
|
// The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
|
||||||
|
// The ingested metrics for each Service monitor will have labels to identify the proxy:
|
||||||
|
// ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
|
||||||
|
// ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
|
||||||
|
// ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
|
||||||
|
// job: ts_<proxy type>_[<parent namespace>]_<parent_name>
|
||||||
|
// +optional
|
||||||
|
ServiceMonitor *ServiceMonitor `json:"serviceMonitor"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServiceMonitor struct {
|
||||||
|
// If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
|
||||||
|
Enable bool `json:"enable"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Container struct {
|
type Container struct {
|
||||||
|
@ -319,6 +319,11 @@ func (in *Env) DeepCopy() *Env {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Metrics) DeepCopyInto(out *Metrics) {
|
func (in *Metrics) DeepCopyInto(out *Metrics) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.ServiceMonitor != nil {
|
||||||
|
in, out := &in.ServiceMonitor, &out.ServiceMonitor
|
||||||
|
*out = new(ServiceMonitor)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics.
|
||||||
@ -526,7 +531,7 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) {
|
|||||||
if in.Metrics != nil {
|
if in.Metrics != nil {
|
||||||
in, out := &in.Metrics, &out.Metrics
|
in, out := &in.Metrics, &out.Metrics
|
||||||
*out = new(Metrics)
|
*out = new(Metrics)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.TailscaleConfig != nil {
|
if in.TailscaleConfig != nil {
|
||||||
in, out := &in.TailscaleConfig, &out.TailscaleConfig
|
in, out := &in.TailscaleConfig, &out.TailscaleConfig
|
||||||
@ -991,6 +996,21 @@ func (in *S3Secret) DeepCopy() *S3Secret {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor.
|
||||||
|
func (in *ServiceMonitor) DeepCopy() *ServiceMonitor {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ServiceMonitor)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
|
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
Loading…
Reference in New Issue
Block a user