mirror of
https://github.com/tailscale/tailscale.git
synced 2025-04-03 23:05:50 +00:00
WIP
This commit is contained in:
parent
436794cf7a
commit
5156ec6a3b
@ -61,6 +61,8 @@
|
|||||||
// and not `tailscale up` or `tailscale set`.
|
// and not `tailscale up` or `tailscale set`.
|
||||||
// The config file contents are currently read once on container start.
|
// The config file contents are currently read once on container start.
|
||||||
// NB: This env var is currently experimental and the logic will likely change!
|
// NB: This env var is currently experimental and the logic will likely change!
|
||||||
|
// - TS_EXPERIMENTAL_SERVICES_CONFIG_PATH
|
||||||
|
// Path where config for Services served by this proxy can be found.
|
||||||
// - EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS: if set to true
|
// - EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS: if set to true
|
||||||
// and if this containerboot instance is an L7 ingress proxy (created by
|
// and if this containerboot instance is an L7 ingress proxy (created by
|
||||||
// the Kubernetes operator), set up rules to allow proxying cluster traffic,
|
// the Kubernetes operator), set up rules to allow proxying cluster traffic,
|
||||||
@ -113,11 +115,14 @@ import (
|
|||||||
kubeutils "tailscale.com/k8s-operator"
|
kubeutils "tailscale.com/k8s-operator"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
"tailscale.com/types/ptr"
|
|
||||||
"tailscale.com/util/deephash"
|
"tailscale.com/util/deephash"
|
||||||
"tailscale.com/util/linuxfw"
|
"tailscale.com/util/linuxfw"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
kubeletMountedConfigLn = "..data"
|
||||||
|
)
|
||||||
|
|
||||||
func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
|
func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
|
||||||
if defaultBool("TS_TEST_FAKE_NETFILTER", false) {
|
if defaultBool("TS_TEST_FAKE_NETFILTER", false) {
|
||||||
return linuxfw.NewFakeIPTablesRunner(), nil
|
return linuxfw.NewFakeIPTablesRunner(), nil
|
||||||
@ -133,6 +138,7 @@ func main() {
|
|||||||
Hostname: defaultEnv("TS_HOSTNAME", ""),
|
Hostname: defaultEnv("TS_HOSTNAME", ""),
|
||||||
Routes: defaultEnvStringPointer("TS_ROUTES"),
|
Routes: defaultEnvStringPointer("TS_ROUTES"),
|
||||||
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
|
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
|
||||||
|
ServicesConfigPath: defaultEnv("TS_EXPERIMENTAL_SERVICES_CONFIG_PATH", ""),
|
||||||
ProxyTargetIP: defaultEnv("TS_DEST_IP", ""),
|
ProxyTargetIP: defaultEnv("TS_DEST_IP", ""),
|
||||||
ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""),
|
ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""),
|
||||||
TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""),
|
TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""),
|
||||||
@ -325,13 +331,13 @@ authLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wantProxy = cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress
|
wantProxy = cfg.ServicesConfigPath != "" || cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress
|
||||||
wantDeviceInfo = cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch
|
wantDeviceInfo = cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch
|
||||||
startupTasksDone = false
|
startupTasksDone = false
|
||||||
currentIPs deephash.Sum // tailscale IPs assigned to device
|
currentIPs deephash.Sum // tailscale IPs assigned to device
|
||||||
currentDeviceInfo deephash.Sum // device ID and fqdn
|
currentDeviceInfo deephash.Sum // device ID and fqdn
|
||||||
|
|
||||||
currentEgressIPs deephash.Sum
|
// currentEgressIPs deephash.Sum
|
||||||
|
|
||||||
addrs []netip.Prefix
|
addrs []netip.Prefix
|
||||||
backendAddrs []net.IP
|
backendAddrs []net.IP
|
||||||
@ -389,6 +395,11 @@ authLoop:
|
|||||||
|
|
||||||
notifyChan := make(chan ipn.Notify)
|
notifyChan := make(chan ipn.Notify)
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
log.Printf("attempting to update service config...")
|
||||||
|
if err := updateServices(cfg, nfr); err != nil {
|
||||||
|
log.Printf("error updating services: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("ConfigMap update processed")
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
n, err := w.Next()
|
n, err := w.Next()
|
||||||
@ -401,7 +412,53 @@ authLoop:
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
if cfg.ServicesConfigPath != "" {
|
||||||
|
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
||||||
|
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
||||||
|
// use if they need to monitor changes
|
||||||
|
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
||||||
|
toWatch := filepath.Join(cfg.ServicesConfigPath, kubeletMountedConfigLn)
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error creating a new watcher for the mounted ConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("will be watching services cm")
|
||||||
|
go func() {
|
||||||
|
defer watcher.Close()
|
||||||
|
for {
|
||||||
|
log.Printf("waiting for ConfigMap updates..")
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Print("context cancelled, exiting ConfigMap watcher")
|
||||||
|
return
|
||||||
|
case event, ok := <-watcher.Events:
|
||||||
|
log.Printf("ConfigMap update received: %s", event)
|
||||||
|
if !ok {
|
||||||
|
log.Fatal("watcher finished; exiting")
|
||||||
|
}
|
||||||
|
if event.Name == toWatch {
|
||||||
|
log.Printf("update is for an event to watch: %s", event)
|
||||||
|
if err := updateServices(cfg, nfr); err != nil {
|
||||||
|
log.Printf("error updating services: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("ConfigMap update processed")
|
||||||
|
} else {
|
||||||
|
log.Printf("update is not for an event to watch: %s", event)
|
||||||
|
}
|
||||||
|
case err, ok := <-watcher.Errors:
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("[unexpected] error watching configuration: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("[unexpected] errors watcher exited")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err = watcher.Add(cfg.ServicesConfigPath); err != nil {
|
||||||
|
log.Fatalf("failed setting up a watcher for the mounted ConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
runLoop:
|
runLoop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -423,111 +480,111 @@ runLoop:
|
|||||||
// whereupon we'll go through initial auth again.
|
// whereupon we'll go through initial auth again.
|
||||||
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
|
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||||
}
|
}
|
||||||
if n.NetMap != nil {
|
// if n.NetMap != nil {
|
||||||
addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
// addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
||||||
newCurrentIPs := deephash.Hash(&addrs)
|
// newCurrentIPs := deephash.Hash(&addrs)
|
||||||
ipsHaveChanged := newCurrentIPs != currentIPs
|
// ipsHaveChanged := newCurrentIPs != currentIPs
|
||||||
|
|
||||||
if cfg.TailnetTargetFQDN != "" {
|
// if cfg.TailnetTargetFQDN != "" {
|
||||||
var (
|
// var (
|
||||||
egressAddrs []netip.Prefix
|
// egressAddrs []netip.Prefix
|
||||||
newCurentEgressIPs deephash.Sum
|
// newCurentEgressIPs deephash.Sum
|
||||||
egressIPsHaveChanged bool
|
// egressIPsHaveChanged bool
|
||||||
node tailcfg.NodeView
|
// node tailcfg.NodeView
|
||||||
nodeFound bool
|
// nodeFound bool
|
||||||
)
|
// )
|
||||||
for _, n := range n.NetMap.Peers {
|
// for _, n := range n.NetMap.Peers {
|
||||||
if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) {
|
// if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) {
|
||||||
node = n
|
// node = n
|
||||||
nodeFound = true
|
// nodeFound = true
|
||||||
break
|
// break
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
if !nodeFound {
|
// if !nodeFound {
|
||||||
log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN)
|
// log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN)
|
||||||
break
|
// break
|
||||||
}
|
// }
|
||||||
egressAddrs = node.Addresses().AsSlice()
|
// egressAddrs = node.Addresses().AsSlice()
|
||||||
newCurentEgressIPs = deephash.Hash(&egressAddrs)
|
// newCurentEgressIPs = deephash.Hash(&egressAddrs)
|
||||||
egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs
|
// egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs
|
||||||
if egressIPsHaveChanged && len(egressAddrs) != 0 {
|
// if egressIPsHaveChanged && len(egressAddrs) != 0 {
|
||||||
for _, egressAddr := range egressAddrs {
|
// for _, egressAddr := range egressAddrs {
|
||||||
ea := egressAddr.Addr()
|
// ea := egressAddr.Addr()
|
||||||
// TODO (irbekrm): make it work for IPv6 too.
|
// // TODO (irbekrm): make it work for IPv6 too.
|
||||||
if ea.Is6() {
|
// if ea.Is6() {
|
||||||
log.Println("Not installing egress forwarding rules for IPv6 as this is currently not supported")
|
// log.Println("Not installing egress forwarding rules for IPv6 as this is currently not supported")
|
||||||
continue
|
// continue
|
||||||
}
|
// }
|
||||||
log.Printf("Installing forwarding rules for destination %v", ea.String())
|
// log.Printf("Installing forwarding rules for destination %v", ea.String())
|
||||||
if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
// if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
||||||
log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
// log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
currentEgressIPs = newCurentEgressIPs
|
// currentEgressIPs = newCurentEgressIPs
|
||||||
}
|
// }
|
||||||
if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
// if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||||
log.Printf("Installing proxy rules")
|
// log.Printf("Installing proxy rules")
|
||||||
if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
// if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
||||||
log.Fatalf("installing ingress proxy rules: %v", err)
|
// log.Fatalf("installing ingress proxy rules: %v", err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
// if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||||
newBackendAddrs, err := resolveDNS(ctx, cfg.ProxyTargetDNSName)
|
// newBackendAddrs, err := resolveDNS(ctx, cfg.ProxyTargetDNSName)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
log.Printf("[unexpected] error resolving DNS name %s: %v", cfg.ProxyTargetDNSName, err)
|
// log.Printf("[unexpected] error resolving DNS name %s: %v", cfg.ProxyTargetDNSName, err)
|
||||||
resetTimer(true)
|
// resetTimer(true)
|
||||||
continue
|
// continue
|
||||||
}
|
// }
|
||||||
backendsHaveChanged := !(slices.EqualFunc(backendAddrs, newBackendAddrs, func(ip1 net.IP, ip2 net.IP) bool {
|
// backendsHaveChanged := !(slices.EqualFunc(backendAddrs, newBackendAddrs, func(ip1 net.IP, ip2 net.IP) bool {
|
||||||
return slices.ContainsFunc(newBackendAddrs, func(ip net.IP) bool { return ip.Equal(ip1) })
|
// return slices.ContainsFunc(newBackendAddrs, func(ip net.IP) bool { return ip.Equal(ip1) })
|
||||||
}))
|
// }))
|
||||||
if backendsHaveChanged {
|
// if backendsHaveChanged {
|
||||||
log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
// log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
||||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
// if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||||
log.Fatalf("error installing ingress proxy rules: %v", err)
|
// log.Fatalf("error installing ingress proxy rules: %v", err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
resetTimer(false)
|
// resetTimer(false)
|
||||||
backendAddrs = newBackendAddrs
|
// backendAddrs = newBackendAddrs
|
||||||
}
|
// }
|
||||||
if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 {
|
// if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 {
|
||||||
cd := n.NetMap.DNS.CertDomains[0]
|
// cd := n.NetMap.DNS.CertDomains[0]
|
||||||
prev := certDomain.Swap(ptr.To(cd))
|
// prev := certDomain.Swap(ptr.To(cd))
|
||||||
if prev == nil || *prev != cd {
|
// if prev == nil || *prev != cd {
|
||||||
select {
|
// select {
|
||||||
case certDomainChanged <- true:
|
// case certDomainChanged <- true:
|
||||||
default:
|
// default:
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
// if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||||
log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
// log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
||||||
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
// if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
||||||
log.Fatalf("installing egress proxy rules: %v", err)
|
// log.Fatalf("installing egress proxy rules: %v", err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
// If this is a L7 cluster ingress proxy (set up
|
// // If this is a L7 cluster ingress proxy (set up
|
||||||
// by Kubernetes operator) and proxying of
|
// // by Kubernetes operator) and proxying of
|
||||||
// cluster traffic to the ingress target is
|
// // cluster traffic to the ingress target is
|
||||||
// enabled, set up proxy rule each time the
|
// // enabled, set up proxy rule each time the
|
||||||
// tailnet IPs of this node change (including
|
// // tailnet IPs of this node change (including
|
||||||
// the first time they become available).
|
// // the first time they become available).
|
||||||
if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
// if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||||
log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
// log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
||||||
if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
// if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
||||||
log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
// log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
currentIPs = newCurrentIPs
|
// currentIPs = newCurrentIPs
|
||||||
|
|
||||||
deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
|
// deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
|
||||||
if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(¤tDeviceInfo, &deviceInfo) {
|
// if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(¤tDeviceInfo, &deviceInfo) {
|
||||||
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
// if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
||||||
log.Fatalf("storing device ID in kube secret: %v", err)
|
// log.Fatalf("storing device ID in kube secret: %v", err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
if !startupTasksDone {
|
if !startupTasksDone {
|
||||||
if (!wantProxy || currentIPs != deephash.Sum{}) && (!wantDeviceInfo || currentDeviceInfo != deephash.Sum{}) {
|
if (!wantProxy || currentIPs != deephash.Sum{}) && (!wantDeviceInfo || currentDeviceInfo != deephash.Sum{}) {
|
||||||
// This log message is used in tests to detect when all
|
// This log message is used in tests to detect when all
|
||||||
@ -697,6 +754,48 @@ func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient
|
|||||||
return tsClient, cmd.Process, nil
|
return tsClient, cmd.Process, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateServices(cfg *settings, nfr linuxfw.NetfilterRunner) error {
|
||||||
|
if cfg.ServicesConfigPath == "" {
|
||||||
|
log.Print("no services config path set")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b, err := os.ReadFile(path.Join(cfg.ServicesConfigPath, "proxyConfig"))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error reading in services config at path %s: %v", cfg.ServicesConfigPath, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
proxyCfg := &kubeutils.ProxyConfig{}
|
||||||
|
if err := json.Unmarshal(b, proxyCfg); err != nil {
|
||||||
|
log.Printf("error unmarshalling proxy config: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(proxyCfg.Services) == 0 {
|
||||||
|
log.Printf("No Services defined")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, svcFg := range proxyCfg.Services {
|
||||||
|
log.Printf("configuring Service for %s", name)
|
||||||
|
if svcFg.Ingress == nil || len(svcFg.Ingress.V4Backends) != 1 {
|
||||||
|
// This prototype only suppports ingress with one backend address
|
||||||
|
log.Printf("[unexpected] service is not ingress with a single backend address")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(svcFg.V4ServiceIPs) != 1 {
|
||||||
|
// This prototype only suppports ingress with one backend address
|
||||||
|
log.Printf("[unexpected] a single service IP expected, got %v", svcFg.V4ServiceIPs)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := nfr.AddDNATRule(svcFg.V4ServiceIPs[0], svcFg.Ingress.V4Backends[0]); err != nil {
|
||||||
|
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||||
|
}
|
||||||
|
if err := nfr.ClampMSSToPMTU("tailscale0", svcFg.V4ServiceIPs[0]); err != nil {
|
||||||
|
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// tailscaledArgs uses cfg to construct the argv for tailscaled.
|
// tailscaledArgs uses cfg to construct the argv for tailscaled.
|
||||||
func tailscaledArgs(cfg *settings) []string {
|
func tailscaledArgs(cfg *settings) []string {
|
||||||
args := []string{"--socket=" + cfg.Socket}
|
args := []string{"--socket=" + cfg.Socket}
|
||||||
@ -1075,6 +1174,7 @@ type settings struct {
|
|||||||
// node FQDN.
|
// node FQDN.
|
||||||
TailnetTargetFQDN string
|
TailnetTargetFQDN string
|
||||||
ServeConfigPath string
|
ServeConfigPath string
|
||||||
|
ServicesConfigPath string
|
||||||
DaemonExtraArgs string
|
DaemonExtraArgs string
|
||||||
ExtraArgs string
|
ExtraArgs string
|
||||||
InKubernetes bool
|
InKubernetes bool
|
||||||
|
@ -13,28 +13,30 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"time"
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
"github.com/miekg/dns"
|
"golang.org/x/net/dns/dnsmessage"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
"tailscale.com/ipn/store/kubestore"
|
||||||
operatorutils "tailscale.com/k8s-operator"
|
operatorutils "tailscale.com/k8s-operator"
|
||||||
|
"tailscale.com/tsnet"
|
||||||
|
"tailscale.com/types/nettype"
|
||||||
"tailscale.com/util/dnsname"
|
"tailscale.com/util/dnsname"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// tsNetDomain is the domain that this DNS nameserver has registered a handler for.
|
|
||||||
tsNetDomain = "ts.net"
|
|
||||||
// addr is the the address that the UDP and TCP listeners will listen on.
|
// addr is the the address that the UDP and TCP listeners will listen on.
|
||||||
addr = ":1053"
|
addr = ":53"
|
||||||
|
|
||||||
// The following constants are specific to the nameserver configuration
|
|
||||||
// provided by a mounted Kubernetes Configmap. The Configmap mounted at
|
|
||||||
// /config is the only supported way for configuring this nameserver.
|
|
||||||
defaultDNSConfigDir = "/config"
|
defaultDNSConfigDir = "/config"
|
||||||
kubeletMountedConfigLn = "..data"
|
kubeletMountedConfigLn = "..data"
|
||||||
)
|
)
|
||||||
@ -55,122 +57,187 @@ type nameserver struct {
|
|||||||
// configuration has changed and the nameserver should update the
|
// configuration has changed and the nameserver should update the
|
||||||
// in-memory records.
|
// in-memory records.
|
||||||
configWatcher <-chan string
|
configWatcher <-chan string
|
||||||
|
proxies []string
|
||||||
|
|
||||||
mu sync.Mutex // protects following
|
mu sync.Mutex // protects following
|
||||||
// ip4 are the in-memory hostname -> IP4 mappings that the nameserver
|
serviceIPs map[dnsname.FQDN][]netip.Addr
|
||||||
// uses to respond to A record queries.
|
|
||||||
ip4 map[dnsname.FQDN][]net.IP
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
// Ensure that we watch the kube Configmap mounted at /config for
|
// state always in 'dnsrecords' Secret
|
||||||
// nameserver configuration updates and send events when updates happen.
|
kubeStateStore, err := kubestore.New(log.Printf, *pointer.StringPtr("nameserver-state"))
|
||||||
c := ensureWatcherForKubeConfigMap(ctx)
|
if err != nil {
|
||||||
|
log.Fatalf("error starting kube state store: %v", err)
|
||||||
|
}
|
||||||
|
ts := tsnet.Server{
|
||||||
|
Logf: log.Printf,
|
||||||
|
Hostname: "dns-server",
|
||||||
|
Dir: "/tmp",
|
||||||
|
Store: kubeStateStore,
|
||||||
|
}
|
||||||
|
if _, err := ts.Up(ctx); err != nil {
|
||||||
|
log.Fatalf("ts.Up: %v", err)
|
||||||
|
}
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
// hardcoded for this prototype
|
||||||
|
proxies := []string{"proxies-0", "proxies-1", "proxies-2", "proxies-3"}
|
||||||
|
c := ensureWatcherForServiceConfigMaps(ctx, proxies)
|
||||||
|
|
||||||
ns := &nameserver{
|
ns := &nameserver{
|
||||||
configReader: configMapConfigReader,
|
configReader: configMapConfigReader,
|
||||||
configWatcher: c,
|
configWatcher: c,
|
||||||
|
proxies: proxies,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that in-memory records get set up to date now and will get
|
ns.runServiceRecordsReconciler(ctx)
|
||||||
// reset when the configuration changes.
|
|
||||||
ns.runRecordsReconciler(ctx)
|
|
||||||
|
|
||||||
// Register a DNS server handle for ts.net domain names. Not having a
|
var wg sync.WaitGroup
|
||||||
// handle registered for any other domain names is how we enforce that
|
|
||||||
// this nameserver can only be used for ts.net domains - querying any
|
|
||||||
// other domain names returns Rcode Refused.
|
|
||||||
dns.HandleFunc(tsNetDomain, ns.handleFunc())
|
|
||||||
|
|
||||||
// Listen for DNS queries over UDP and TCP.
|
udpListener, err := ts.Listen("udp", addr)
|
||||||
udpSig := make(chan os.Signal)
|
|
||||||
tcpSig := make(chan os.Signal)
|
|
||||||
go listenAndServe("udp", addr, udpSig)
|
|
||||||
go listenAndServe("tcp", addr, tcpSig)
|
|
||||||
sig := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
s := <-sig
|
|
||||||
log.Printf("OS signal (%s) received, shutting down", s)
|
|
||||||
cancel() // exit the records reconciler and configmap watcher goroutines
|
|
||||||
udpSig <- s // stop the UDP listener
|
|
||||||
tcpSig <- s // stop the TCP listener
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleFunc is a DNS query handler that can respond to A record queries from
|
|
||||||
// the nameserver's in-memory records.
|
|
||||||
// - If an A record query is received and the
|
|
||||||
// nameserver's in-memory records contain records for the queried domain name,
|
|
||||||
// return a success response.
|
|
||||||
// - If an A record query is received, but the
|
|
||||||
// nameserver's in-memory records do not contain records for the queried domain name,
|
|
||||||
// return NXDOMAIN.
|
|
||||||
// - If an A record query is received, but the queried domain name is not valid, return Format Error.
|
|
||||||
// - If a query is received for any other record type than A, return Not Implemented.
|
|
||||||
func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) {
|
|
||||||
h := func(w dns.ResponseWriter, r *dns.Msg) {
|
|
||||||
m := new(dns.Msg)
|
|
||||||
defer func() {
|
|
||||||
w.WriteMsg(m)
|
|
||||||
}()
|
|
||||||
if len(r.Question) < 1 {
|
|
||||||
log.Print("[unexpected] nameserver received a request with no questions")
|
|
||||||
m = r.SetRcodeFormatError(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO (irbekrm): maybe set message compression
|
|
||||||
switch r.Question[0].Qtype {
|
|
||||||
case dns.TypeA:
|
|
||||||
q := r.Question[0].Name
|
|
||||||
fqdn, err := dnsname.ToFQDN(q)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m = r.SetRcodeFormatError(r)
|
log.Fatalf("failed listening on udp port :53")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// The only supported use of this nameserver is as a
|
defer udpListener.Close()
|
||||||
// single source of truth for MagicDNS names by
|
wg.Add(1)
|
||||||
// non-tailnet Kubernetes workloads.
|
go func() {
|
||||||
m.Authoritative = true
|
ns.serveDNS(udpListener)
|
||||||
m.RecursionAvailable = false
|
}()
|
||||||
|
log.Printf("Listening for DNS on UDP %s", udpListener.Addr())
|
||||||
|
|
||||||
ips := n.lookupIP4(fqdn)
|
tcpListener, err := ts.Listen("tcp", addr)
|
||||||
if ips == nil || len(ips) == 0 {
|
if err != nil {
|
||||||
// As we are the authoritative nameserver for MagicDNS
|
log.Fatalf("failed listening on tcp port :53")
|
||||||
// names, if we do not have a record for this MagicDNS
|
|
||||||
// name, it does not exist.
|
|
||||||
m = m.SetRcode(r, dns.RcodeNameError)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// TODO (irbekrm): TTL is currently set to 0, meaning
|
defer tcpListener.Close()
|
||||||
// that cluster workloads will not cache the DNS
|
wg.Add(1)
|
||||||
// records. Revisit this in future when we understand
|
go func() {
|
||||||
// the usage patterns better- is it putting too much
|
ns.serveDNS(tcpListener)
|
||||||
// load on kube DNS server or is this fine?
|
}()
|
||||||
for _, ip := range ips {
|
log.Printf("Listening for DNS on TCP %s", tcpListener.Addr())
|
||||||
rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip}
|
wg.Wait()
|
||||||
m.SetRcode(r, dns.RcodeSuccess)
|
|
||||||
m.Answer = append(m.Answer, rr)
|
|
||||||
}
|
|
||||||
case dns.TypeAAAA:
|
|
||||||
// TODO (irbekrm): implement IPv6 support.
|
|
||||||
// Kubernetes distributions that I am most familiar with
|
|
||||||
// default to IPv4 for Pod CIDR ranges and often many cases don't
|
|
||||||
// support IPv6 at all, so this should not be crucial for now.
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String())
|
|
||||||
m.SetRcode(r, dns.RcodeNotImplemented)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// runRecordsReconciler ensures that nameserver's in-memory records are
|
func (c *nameserver) serveDNS(ln net.Listener) {
|
||||||
// reset when the provided configuration changes.
|
for {
|
||||||
func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
conn, err := ln.Accept()
|
||||||
log.Print("updating nameserver's records from the provided configuration...")
|
if err != nil {
|
||||||
if err := n.resetRecords(); err != nil { // ensure records are up to date before the nameserver starts
|
log.Printf("serveDNS accept: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go c.handleServiceName(conn.(nettype.ConnPacketConn))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
|
||||||
|
|
||||||
|
func (ns *nameserver) handleServiceName(conn nettype.ConnPacketConn) {
|
||||||
|
defer conn.Close()
|
||||||
|
conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
buf := make([]byte, 1500)
|
||||||
|
n, err := conn.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("handeServiceName: read failed: %v\n ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var msg dnsmessage.Message
|
||||||
|
err = msg.Unpack(buf[:n])
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("handleServiceName: dnsmessage unpack failed: %v\n ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp, err := ns.generateDNSResponse(&msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("handleServiceName: DNS response generation failed: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(resp) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = conn.Write(resp)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("handleServiceName: write failed: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *nameserver) generateDNSResponse(req *dnsmessage.Message) ([]byte, error) {
|
||||||
|
b := dnsmessage.NewBuilder(nil,
|
||||||
|
dnsmessage.Header{
|
||||||
|
ID: req.Header.ID,
|
||||||
|
Response: true,
|
||||||
|
Authoritative: true,
|
||||||
|
})
|
||||||
|
b.EnableCompression()
|
||||||
|
|
||||||
|
if len(req.Questions) == 0 {
|
||||||
|
return b.Finish()
|
||||||
|
}
|
||||||
|
q := req.Questions[0]
|
||||||
|
if err := b.StartQuestions(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := b.Question(q); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := b.StartAnswers(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
switch q.Type {
|
||||||
|
case dnsmessage.TypeA:
|
||||||
|
log.Printf("query for an A record")
|
||||||
|
var fqdn dnsname.FQDN
|
||||||
|
fqdn, err = dnsname.ToFQDN(q.Name.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Print("format error")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Print("locking service IPs")
|
||||||
|
ns.mu.Lock()
|
||||||
|
ips := ns.serviceIPs[fqdn]
|
||||||
|
ns.mu.Unlock()
|
||||||
|
log.Print("unlocking service IPs")
|
||||||
|
|
||||||
|
if ips == nil || len(ips) == 0 {
|
||||||
|
log.Printf("nameserver has no IPs for %s", fqdn)
|
||||||
|
// NXDOMAIN?
|
||||||
|
return nil, fmt.Errorf("no address found for %s", fqdn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// return a random IP
|
||||||
|
i := rand.Intn(len(ips))
|
||||||
|
ip := ips[i]
|
||||||
|
log.Printf("produced IP address %s", ip)
|
||||||
|
err = b.AResource(
|
||||||
|
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 5},
|
||||||
|
dnsmessage.AResource{A: ip.As4()},
|
||||||
|
)
|
||||||
|
case dnsmessage.TypeSOA:
|
||||||
|
err = b.SOAResource(
|
||||||
|
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||||
|
dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600,
|
||||||
|
Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60},
|
||||||
|
)
|
||||||
|
case dnsmessage.TypeNS:
|
||||||
|
err = b.NSResource(
|
||||||
|
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||||
|
dnsmessage.NSResource{NS: tsMBox},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b.Finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nameserver) runServiceRecordsReconciler(ctx context.Context) {
|
||||||
|
log.Print("updating nameserver's records from the provided services configuration...")
|
||||||
|
if err := n.resetServiceRecords(); err != nil { // ensure records are up to date before the nameserver starts
|
||||||
log.Fatalf("error setting nameserver's records: %v", err)
|
log.Fatalf("error setting nameserver's records: %v", err)
|
||||||
}
|
}
|
||||||
log.Print("nameserver's records were updated")
|
log.Print("nameserver's records were updated")
|
||||||
@ -182,13 +249,7 @@ func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
case <-n.configWatcher:
|
case <-n.configWatcher:
|
||||||
log.Print("configuration update detected, resetting records")
|
log.Print("configuration update detected, resetting records")
|
||||||
if err := n.resetRecords(); err != nil {
|
if err := n.resetServiceRecords(); err != nil {
|
||||||
// TODO (irbekrm): this runs in a
|
|
||||||
// container that will be thrown away,
|
|
||||||
// so this should be ok. But maybe still
|
|
||||||
// need to ensure that the DNS server
|
|
||||||
// terminates connections more
|
|
||||||
// gracefully.
|
|
||||||
log.Fatalf("error resetting records: %v", err)
|
log.Fatalf("error resetting records: %v", err)
|
||||||
}
|
}
|
||||||
log.Print("nameserver records were reset")
|
log.Print("nameserver records were reset")
|
||||||
@ -197,93 +258,47 @@ func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetRecords sets the in-memory DNS records of this nameserver from the
|
func (n *nameserver) resetServiceRecords() error {
|
||||||
// provided configuration. It does not check for the diff, so the caller is
|
ip4 := make(map[dnsname.FQDN][]netip.Addr)
|
||||||
// expected to ensure that this is only called when reset is needed.
|
for _, proxy := range n.proxies {
|
||||||
func (n *nameserver) resetRecords() error {
|
dnsCfgBytes, err := proxyConfigReader(proxy)
|
||||||
dnsCfgBytes, err := n.configReader()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error reading nameserver's configuration: %v", err)
|
log.Printf("error reading proxy config for %s configuration: %v", proxy, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 {
|
if dnsCfgBytes == nil || len(dnsCfgBytes) == 0 {
|
||||||
log.Print("nameserver's configuration is empty, any in-memory records will be unset")
|
log.Printf("configuration for proxy %s is empty; do nothing", proxy)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
proxyCfg := &operatorutils.ProxyConfig{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(dnsCfgBytes, proxyCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling proxy config: %v\n", err)
|
||||||
|
}
|
||||||
|
for _, svc := range proxyCfg.Services {
|
||||||
|
log.Printf("adding record for Service %s", svc.FQDN)
|
||||||
|
ip4[dnsname.FQDN(svc.FQDN)] = append(ip4[dnsname.FQDN(svc.FQDN)], svc.V4ServiceIPs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("after update DNS records are %#+v", ip4)
|
||||||
n.mu.Lock()
|
n.mu.Lock()
|
||||||
n.ip4 = make(map[dnsname.FQDN][]net.IP)
|
n.serviceIPs = ip4
|
||||||
n.mu.Unlock()
|
n.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
|
||||||
dnsCfg := &operatorutils.Records{}
|
|
||||||
err = json.Unmarshal(dnsCfgBytes, dnsCfg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error unmarshalling nameserver configuration: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if dnsCfg.Version != operatorutils.Alpha1Version {
|
|
||||||
return fmt.Errorf("unsupported configuration version %s, supported versions are %s\n", dnsCfg.Version, operatorutils.Alpha1Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
ip4 := make(map[dnsname.FQDN][]net.IP)
|
|
||||||
defer func() {
|
|
||||||
n.mu.Lock()
|
|
||||||
defer n.mu.Unlock()
|
|
||||||
n.ip4 = ip4
|
|
||||||
}()
|
|
||||||
|
|
||||||
if len(dnsCfg.IP4) == 0 {
|
|
||||||
log.Print("nameserver's configuration contains no records, any in-memory records will be unset")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for fqdn, ips := range dnsCfg.IP4 {
|
|
||||||
fqdn, err := dnsname.ToFQDN(fqdn)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err)
|
|
||||||
continue // one invalid hostname should not break the whole nameserver
|
|
||||||
}
|
|
||||||
for _, ipS := range ips {
|
|
||||||
ip := net.ParseIP(ipS).To4()
|
|
||||||
if ip == nil { // To4 returns nil if IP is not a IPv4 address
|
|
||||||
log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS)
|
|
||||||
continue // one invalid IP address should not break the whole nameserver
|
|
||||||
}
|
|
||||||
ip4[fqdn] = []net.IP{ip}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// listenAndServe starts a DNS server for the provided network and address.
|
// ensureWatcherForServiceConfigMaps sets up a new file watcher for the
|
||||||
func listenAndServe(net, addr string, shutdown chan os.Signal) {
|
// ConfigMaps containing records for Services served by the operator proxies.
|
||||||
s := &dns.Server{Addr: addr, Net: net}
|
func ensureWatcherForServiceConfigMaps(ctx context.Context, proxies []string) chan string {
|
||||||
go func() {
|
|
||||||
<-shutdown
|
|
||||||
log.Printf("shutting down server for %s", net)
|
|
||||||
s.Shutdown()
|
|
||||||
}()
|
|
||||||
log.Printf("listening for %s queries on %s", net, addr)
|
|
||||||
if err := s.ListenAndServe(); err != nil {
|
|
||||||
log.Fatalf("error running %s server: %v", net, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureWatcherForKubeConfigMap sets up a new file watcher for the ConfigMap
|
|
||||||
// that's expected to be mounted at /config. Returns a channel that receives an
|
|
||||||
// event every time the contents get updated.
|
|
||||||
func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
|
||||||
c := make(chan string)
|
c := make(chan string)
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error creating a new watcher for the mounted ConfigMap: %v", err)
|
log.Fatalf("error creating a new watcher for the services ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
|
||||||
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
|
||||||
// use if they need to monitor changes
|
|
||||||
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
|
||||||
toWatch := filepath.Join(defaultDNSConfigDir, kubeletMountedConfigLn)
|
|
||||||
go func() {
|
go func() {
|
||||||
defer watcher.Close()
|
defer watcher.Close()
|
||||||
log.Printf("starting file watch for %s", defaultDNSConfigDir)
|
log.Printf("starting file watch for %s", "/services/")
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@ -293,35 +308,31 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
|||||||
if !ok {
|
if !ok {
|
||||||
log.Fatal("watcher finished; exiting")
|
log.Fatal("watcher finished; exiting")
|
||||||
}
|
}
|
||||||
if event.Name == toWatch {
|
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
||||||
|
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
||||||
|
// use if they need to monitor changes
|
||||||
|
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
||||||
|
if strings.HasSuffix(event.Name, kubeletMountedConfigLn) {
|
||||||
msg := fmt.Sprintf("ConfigMap update received: %s", event)
|
msg := fmt.Sprintf("ConfigMap update received: %s", event)
|
||||||
log.Print(msg)
|
log.Print(msg)
|
||||||
c <- msg
|
n := path.Dir(event.Name)
|
||||||
|
base := path.Base(n)
|
||||||
|
c <- base // which proxy's ConfigMap should be updated
|
||||||
}
|
}
|
||||||
case err, ok := <-watcher.Errors:
|
case err, ok := <-watcher.Errors:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO (irbekrm): this runs in a
|
log.Fatalf("[unexpected] error watching services configuration: %v", err)
|
||||||
// container that will be thrown away,
|
|
||||||
// so this should be ok. But maybe still
|
|
||||||
// need to ensure that the DNS server
|
|
||||||
// terminates connections more
|
|
||||||
// gracefully.
|
|
||||||
log.Fatalf("[unexpected] error watching configuration: %v", err)
|
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
// TODO (irbekrm): this runs in a
|
|
||||||
// container that will be thrown away,
|
|
||||||
// so this should be ok. But maybe still
|
|
||||||
// need to ensure that the DNS server
|
|
||||||
// terminates connections more
|
|
||||||
// gracefully.
|
|
||||||
log.Fatalf("[unexpected] errors watcher exited")
|
log.Fatalf("[unexpected] errors watcher exited")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err = watcher.Add(defaultDNSConfigDir); err != nil {
|
for _, name := range proxies {
|
||||||
log.Fatalf("failed setting up a watcher for the mounted ConfigMap: %v", err)
|
if err = watcher.Add(filepath.Join("/services", name)); err != nil {
|
||||||
|
log.Fatalf("failed setting up a watcher for config for %s : %v", name, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@ -341,14 +352,14 @@ var configMapConfigReader configReaderFunc = func() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// lookupIP4 returns any IPv4 addresses for the given FQDN from nameserver's
|
func proxyConfigReader(proxy string) ([]byte, error) {
|
||||||
// in-memory records.
|
path := filepath.Join("/services", proxy, "proxyConfig")
|
||||||
func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP {
|
if bs, err := os.ReadFile(path); err == nil {
|
||||||
if n.ip4 == nil {
|
return bs, err
|
||||||
return nil
|
} else if os.IsNotExist(err) {
|
||||||
|
log.Printf("path %s does not exist", path)
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("error reading %s: %w", path, err)
|
||||||
}
|
}
|
||||||
n.mu.Lock()
|
|
||||||
defer n.mu.Unlock()
|
|
||||||
f := n.ip4[fqdn]
|
|
||||||
return f
|
|
||||||
}
|
}
|
||||||
|
28
cmd/k8s-operator/HA.md
Normal file
28
cmd/k8s-operator/HA.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
To try out:
|
||||||
|
(This is the order in which I am testing this prototype. It may or may not work in a different order)
|
||||||
|
- from this branch run
|
||||||
|
```
|
||||||
|
helm upgrade --install operator ./cmd/k8s-operator/deploy/chart/ -n tailscale --set operatorConfig.image.repo=gcr.io/csi-test-290908/operator --set operatorConfig.image.tag=v0.0.16proxycidr --set proxyConfig.image.repo=gcr.io/csi-test-290908/proxy --set proxyConfig.image.tag=v0.0.15proxycidr --set oauth.clientId=<oauth-client-id> --set oauth.clientSecret=<oauth-client-secret> operatorConfig.logging=debug --create-namespace
|
||||||
|
```
|
||||||
|
|
||||||
|
- run `kubectl apply -f ./cmd/k8s-operator/deploy/examples/clusterconfig.yaml`
|
||||||
|
^ but you want to modify the domain before to not point at my tailnet
|
||||||
|
This will create an STS with 4 replicas in tailscale namespace
|
||||||
|
|
||||||
|
- create some cluster ingress Service
|
||||||
|
Each proxy should set up firewall rules to expose the service on one of the IPs it's advertizing
|
||||||
|
|
||||||
|
- to test that it works so far- for one of the proxies, figure out what service IP it is advertizing the
|
||||||
|
cluster service on (i.e by looking at proxies-0 ConfigMap in tailscale namespace) and attempt
|
||||||
|
to access that from a client that has `--accept-routes` set to true.
|
||||||
|
|
||||||
|
- run `kubectl apply -f ./cmd/k8s-operator/deploy/examples/dnsconfig.yaml`
|
||||||
|
This will create a nameserver that is currently not on tailnet.
|
||||||
|
You should be able to <dig @nameservers-cluster-ip <dns-name-of-your-service> and get back one of the tailnet IPs that the proxies expose this service on.
|
||||||
|
|
||||||
|
Next steps:
|
||||||
|
- expose the nameserver, maybe on an operator egress?
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- right now, machines hardcoded to 4, range hardcoded to "100.64.2.0/26", "100.64.2.64/26", "100.64.2.128/26", "100.64.2.192/26"
|
||||||
|
Operator creates a StatefulSet with 4 replicas for an applied ClusterConfig
|
@ -159,11 +159,11 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||||||
// maybeProvisionConnector ensures that any new resources required for this
|
// maybeProvisionConnector ensures that any new resources required for this
|
||||||
// Connector instance are deployed to the cluster.
|
// Connector instance are deployed to the cluster.
|
||||||
func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) error {
|
func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) error {
|
||||||
hostname := cn.Name + "-connector"
|
// hostname := cn.Name + "-connector"
|
||||||
if cn.Spec.Hostname != "" {
|
// if cn.Spec.Hostname != "" {
|
||||||
hostname = string(cn.Spec.Hostname)
|
// hostname = string(cn.Spec.Hostname)
|
||||||
}
|
// }
|
||||||
crl := childResourceLabels(cn.Name, a.tsnamespace, "connector")
|
// crl := childResourceLabels(cn.Name, a.tsnamespace, "connector")
|
||||||
|
|
||||||
proxyClass := cn.Spec.ProxyClass
|
proxyClass := cn.Spec.ProxyClass
|
||||||
if proxyClass != "" {
|
if proxyClass != "" {
|
||||||
@ -175,33 +175,33 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sts := &tailscaleSTSConfig{
|
// sts := &tailscaleSTSConfig{
|
||||||
ParentResourceName: cn.Name,
|
// ParentResourceName: cn.Name,
|
||||||
ParentResourceUID: string(cn.UID),
|
// ParentResourceUID: string(cn.UID),
|
||||||
Hostname: hostname,
|
// Hostname: hostname,
|
||||||
ChildResourceLabels: crl,
|
// ChildResourceLabels: crl,
|
||||||
Tags: cn.Spec.Tags.Stringify(),
|
// Tags: cn.Spec.Tags.Stringify(),
|
||||||
Connector: &connector{
|
// Connector: &connector{
|
||||||
isExitNode: cn.Spec.ExitNode,
|
// isExitNode: cn.Spec.ExitNode,
|
||||||
},
|
// },
|
||||||
ProxyClass: proxyClass,
|
// ProxyClass: proxyClass,
|
||||||
}
|
// }
|
||||||
|
|
||||||
if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
// if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
||||||
sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
|
// sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
|
||||||
}
|
// }
|
||||||
|
|
||||||
a.mu.Lock()
|
// a.mu.Lock()
|
||||||
if sts.Connector.isExitNode {
|
// if sts.Connector.isExitNode {
|
||||||
a.exitNodes.Add(cn.UID)
|
// a.exitNodes.Add(cn.UID)
|
||||||
} else {
|
// } else {
|
||||||
a.exitNodes.Remove(cn.UID)
|
// a.exitNodes.Remove(cn.UID)
|
||||||
}
|
// }
|
||||||
if sts.Connector.routes != "" {
|
// if sts.Connector.routes != "" {
|
||||||
a.subnetRouters.Add(cn.GetUID())
|
// a.subnetRouters.Add(cn.GetUID())
|
||||||
} else {
|
// } else {
|
||||||
a.subnetRouters.Remove(cn.GetUID())
|
// a.subnetRouters.Remove(cn.GetUID())
|
||||||
}
|
// }
|
||||||
a.mu.Unlock()
|
a.mu.Unlock()
|
||||||
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
|
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
|
||||||
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
|
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
|
||||||
@ -210,8 +210,8 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
|||||||
connectors.AddSlice(a.subnetRouters.Slice())
|
connectors.AddSlice(a.subnetRouters.Slice())
|
||||||
gaugeConnectorResources.Set(int64(connectors.Len()))
|
gaugeConnectorResources.Set(int64(connectors.Len()))
|
||||||
|
|
||||||
_, err := a.ssr.Provision(ctx, logger, sts)
|
// _, err := a.ssr.Provision(ctx, logger, sts)
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
|
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
|
||||||
|
@ -27,6 +27,9 @@ rules:
|
|||||||
- apiGroups: ["tailscale.com"]
|
- apiGroups: ["tailscale.com"]
|
||||||
resources: ["dnsconfigs", "dnsconfigs/status"]
|
resources: ["dnsconfigs", "dnsconfigs/status"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["tailscale.com"]
|
||||||
|
resources: ["clusterconfigs", "clusterconfigs/status"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
@ -56,6 +59,13 @@ rules:
|
|||||||
- apiGroups: ["discovery.k8s.io"]
|
- apiGroups: ["discovery.k8s.io"]
|
||||||
resources: ["endpointslices"]
|
resources: ["endpointslices"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- '*'
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: servicerecords
|
@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
controller-gen.kubebuilder.io/version: v0.13.0
|
||||||
|
name: clusterconfigs.tailscale.com
|
||||||
|
spec:
|
||||||
|
group: tailscale.com
|
||||||
|
names:
|
||||||
|
kind: ClusterConfig
|
||||||
|
listKind: ClusterConfigList
|
||||||
|
plural: clusterconfigs
|
||||||
|
singular: clusterconfig
|
||||||
|
scope: Cluster
|
||||||
|
versions:
|
||||||
|
- name: v1alpha1
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: 'More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- domain
|
||||||
|
properties:
|
||||||
|
domain:
|
||||||
|
description: like 'foo.tailbd97a.ts.net' for services like 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
description: ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- proxyNodes
|
||||||
|
properties:
|
||||||
|
proxyNodes:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- magicDNSName
|
||||||
|
- serviceCIDR
|
||||||
|
- tailnetIPs
|
||||||
|
properties:
|
||||||
|
magicDNSName:
|
||||||
|
type: string
|
||||||
|
serviceCIDR:
|
||||||
|
type: string
|
||||||
|
tailnetIPs:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
subresources:
|
||||||
|
status: {}
|
8
cmd/k8s-operator/deploy/examples/clusterconfig.yaml
Normal file
8
cmd/k8s-operator/deploy/examples/clusterconfig.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# It would be nice if users didn't need to apply this separately, but not sure
|
||||||
|
# how to integrate this into the chart (post-render hook?)
|
||||||
|
apiVersion: tailscale.com/v1alpha1
|
||||||
|
kind: ClusterConfig
|
||||||
|
metadata:
|
||||||
|
name: proxies
|
||||||
|
spec:
|
||||||
|
domain: "foo.bar." # must have the dot at the moment
|
@ -14,6 +14,5 @@ spec:
|
|||||||
hostname: ts-prod
|
hostname: ts-prod
|
||||||
subnetRouter:
|
subnetRouter:
|
||||||
advertiseRoutes:
|
advertiseRoutes:
|
||||||
- "10.40.0.0/14"
|
- "10.0.0.0/8"
|
||||||
- "192.168.0.0/14"
|
exitNode: false
|
||||||
exitNode: true
|
|
||||||
|
@ -5,5 +5,5 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
nameserver:
|
nameserver:
|
||||||
image:
|
image:
|
||||||
repo: tailscale/k8s-nameserver
|
repo: gcr.io/csi-test-290908/nameserver
|
||||||
tag: unstable-v1.65
|
tag: v0.0.23proxycidr
|
||||||
|
@ -28,6 +28,12 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: dnsrecords
|
- name: dnsrecords
|
||||||
mountPath: /config
|
mountPath: /config
|
||||||
|
env:
|
||||||
|
- name: TS_AUTHKEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: nameserver-key
|
||||||
|
key: ts_auth_key
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
serviceAccount: nameserver
|
serviceAccount: nameserver
|
||||||
serviceAccountName: nameserver
|
serviceAccountName: nameserver
|
||||||
|
11
cmd/k8s-operator/deploy/manifests/nameserver/role.yaml
Normal file
11
cmd/k8s-operator/deploy/manifests/nameserver/role.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: nameserver
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- '*'
|
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: nameserver
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: nameserver
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: nameserver
|
4
cmd/k8s-operator/deploy/manifests/nameserver/secret.yaml
Normal file
4
cmd/k8s-operator/deploy/manifests/nameserver/secret.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: nameserver-key
|
@ -27,6 +27,12 @@ metadata:
|
|||||||
name: proxies
|
name: proxies
|
||||||
namespace: tailscale
|
namespace: tailscale
|
||||||
---
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
data: null
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: servicerecords
|
||||||
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
@ -1359,6 +1365,16 @@ rules:
|
|||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- tailscale.com
|
||||||
|
resources:
|
||||||
|
- clusterconfigs
|
||||||
|
- clusterconfigs/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- update
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
@ -1402,6 +1418,13 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- '*'
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
|
@ -26,6 +26,10 @@ spec:
|
|||||||
env:
|
env:
|
||||||
- name: TS_USERSPACE
|
- name: TS_USERSPACE
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
- name: POD_IP
|
- name: POD_IP
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -248,32 +247,32 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||||||
}
|
}
|
||||||
|
|
||||||
crl := childResourceLabels(ing.Name, ing.Namespace, "ingress")
|
crl := childResourceLabels(ing.Name, ing.Namespace, "ingress")
|
||||||
var tags []string
|
// var tags []string
|
||||||
if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
// if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||||
tags = strings.Split(tstr, ",")
|
// tags = strings.Split(tstr, ",")
|
||||||
}
|
// }
|
||||||
hostname := ing.Namespace + "-" + ing.Name + "-ingress"
|
// hostname := ing.Namespace + "-" + ing.Name + "-ingress"
|
||||||
if tlsHost != "" {
|
// if tlsHost != "" {
|
||||||
hostname, _, _ = strings.Cut(tlsHost, ".")
|
// hostname, _, _ = strings.Cut(tlsHost, ".")
|
||||||
}
|
// }
|
||||||
|
|
||||||
sts := &tailscaleSTSConfig{
|
// sts := &tailscaleSTSConfig{
|
||||||
Hostname: hostname,
|
// Hostname: hostname,
|
||||||
ParentResourceName: ing.Name,
|
// ParentResourceName: ing.Name,
|
||||||
ParentResourceUID: string(ing.UID),
|
// ParentResourceUID: string(ing.UID),
|
||||||
ServeConfig: sc,
|
// ServeConfig: sc,
|
||||||
Tags: tags,
|
// Tags: tags,
|
||||||
ChildResourceLabels: crl,
|
// ChildResourceLabels: crl,
|
||||||
ProxyClass: proxyClass,
|
// ProxyClass: proxyClass,
|
||||||
}
|
// }
|
||||||
|
|
||||||
if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
// if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
||||||
sts.ForwardClusterTrafficViaL7IngressProxy = true
|
// sts.ForwardClusterTrafficViaL7IngressProxy = true
|
||||||
}
|
// }
|
||||||
|
|
||||||
if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
|
// if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||||
return fmt.Errorf("failed to provision: %w", err)
|
// return fmt.Errorf("failed to provision: %w", err)
|
||||||
}
|
// }
|
||||||
|
|
||||||
_, tsHost, _, err := a.ssr.DeviceInfo(ctx, crl)
|
_, tsHost, _, err := a.ssr.DeviceInfo(ctx, crl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -8,6 +8,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"slices"
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -18,6 +19,7 @@ import (
|
|||||||
xslices "golang.org/x/exp/slices"
|
xslices "golang.org/x/exp/slices"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -26,10 +28,12 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
|
"tailscale.com/client/tailscale"
|
||||||
tsoperator "tailscale.com/k8s-operator"
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
"tailscale.com/tstime"
|
"tailscale.com/tstime"
|
||||||
"tailscale.com/util/clientmetric"
|
"tailscale.com/util/clientmetric"
|
||||||
|
"tailscale.com/util/mak"
|
||||||
"tailscale.com/util/set"
|
"tailscale.com/util/set"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,10 +57,12 @@ const (
|
|||||||
// response to users applying DNSConfig.
|
// response to users applying DNSConfig.
|
||||||
type NameserverReconciler struct {
|
type NameserverReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
|
tsClient tsClient
|
||||||
logger *zap.SugaredLogger
|
logger *zap.SugaredLogger
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
clock tstime.Clock
|
clock tstime.Clock
|
||||||
tsNamespace string
|
tsNamespace string
|
||||||
|
defaultTags []string
|
||||||
|
|
||||||
mu sync.Mutex // protects following
|
mu sync.Mutex // protects following
|
||||||
managedNameservers set.Slice[types.UID] // one or none
|
managedNameservers set.Slice[types.UID] // one or none
|
||||||
@ -171,6 +177,8 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
|||||||
labels: labels,
|
labels: labels,
|
||||||
imageRepo: defaultNameserverImageRepo,
|
imageRepo: defaultNameserverImageRepo,
|
||||||
imageTag: defaultNameserverImageTag,
|
imageTag: defaultNameserverImageTag,
|
||||||
|
tsClient: a.tsClient,
|
||||||
|
tags: a.defaultTags,
|
||||||
}
|
}
|
||||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
|
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
|
||||||
dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo
|
dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo
|
||||||
@ -178,7 +186,7 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
|||||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
|
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
|
||||||
dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag
|
dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag
|
||||||
}
|
}
|
||||||
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} {
|
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable, secretDeployable, roleDeployable, roleBindingDeployable} {
|
||||||
if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil {
|
if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil {
|
||||||
return fmt.Errorf("error reconciling %s: %w", deployable.kind, err)
|
return fmt.Errorf("error reconciling %s: %w", deployable.kind, err)
|
||||||
}
|
}
|
||||||
@ -197,6 +205,23 @@ func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.D
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) {
|
||||||
|
caps := tailscale.KeyCapabilities{
|
||||||
|
Devices: tailscale.KeyDeviceCapabilities{
|
||||||
|
Create: tailscale.KeyDeviceCreateCapabilities{
|
||||||
|
Reusable: false,
|
||||||
|
Preauthorized: true,
|
||||||
|
Tags: tags,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
key, _, err := tsClient.CreateKey(ctx, caps)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
type deployable struct {
|
type deployable struct {
|
||||||
kind string
|
kind string
|
||||||
updateObj func(context.Context, *deployConfig, client.Client) error
|
updateObj func(context.Context, *deployConfig, client.Client) error
|
||||||
@ -208,6 +233,8 @@ type deployConfig struct {
|
|||||||
labels map[string]string
|
labels map[string]string
|
||||||
ownerRefs []metav1.OwnerReference
|
ownerRefs []metav1.OwnerReference
|
||||||
namespace string
|
namespace string
|
||||||
|
tsClient tsClient
|
||||||
|
tags []string
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -219,6 +246,12 @@ var (
|
|||||||
saYaml []byte
|
saYaml []byte
|
||||||
//go:embed deploy/manifests/nameserver/svc.yaml
|
//go:embed deploy/manifests/nameserver/svc.yaml
|
||||||
svcYaml []byte
|
svcYaml []byte
|
||||||
|
//go:embed deploy/manifests/nameserver/secret.yaml
|
||||||
|
secretYaml []byte
|
||||||
|
//go:embed deploy/manifests/nameserver/role.yaml
|
||||||
|
roleYaml []byte
|
||||||
|
//go:embed deploy/manifests/nameserver/rolebinding.yaml
|
||||||
|
rolebindingYaml []byte
|
||||||
|
|
||||||
deployDeployable = deployable{
|
deployDeployable = deployable{
|
||||||
kind: "Deployment",
|
kind: "Deployment",
|
||||||
@ -234,7 +267,33 @@ var (
|
|||||||
updateF := func(oldD *appsv1.Deployment) {
|
updateF := func(oldD *appsv1.Deployment) {
|
||||||
oldD.Spec = d.Spec
|
oldD.Spec = d.Spec
|
||||||
}
|
}
|
||||||
_, err := createOrUpdate[appsv1.Deployment](ctx, kubeClient, cfg.namespace, d, updateF)
|
// Get all proxy ConfigMaps and mount them
|
||||||
|
cmList := &corev1.ConfigMapList{}
|
||||||
|
sel, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"component": "proxies"}})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating label selector: %w", err)
|
||||||
|
}
|
||||||
|
if err := kubeClient.List(ctx, cmList, &client.ListOptions{LabelSelector: sel}); err != nil {
|
||||||
|
return fmt.Errorf("error listing ConfigMaps: %w", err)
|
||||||
|
}
|
||||||
|
for _, cm := range cmList.Items {
|
||||||
|
volume := corev1.Volume{
|
||||||
|
Name: cm.Name,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{Name: cm.Name},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumeMount := corev1.VolumeMount{
|
||||||
|
Name: cm.Name,
|
||||||
|
MountPath: fmt.Sprintf("/services/%s", cm.Name),
|
||||||
|
ReadOnly: true,
|
||||||
|
}
|
||||||
|
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume)
|
||||||
|
d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
|
||||||
|
}
|
||||||
|
_, err = createOrUpdate[appsv1.Deployment](ctx, kubeClient, cfg.namespace, d, updateF)
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -266,6 +325,37 @@ var (
|
|||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
secretDeployable = deployable{
|
||||||
|
kind: "Secret",
|
||||||
|
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||||
|
secret := new(corev1.Secret)
|
||||||
|
if err := yaml.Unmarshal(secretYaml, &secret); err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling Secret yaml: %w", err)
|
||||||
|
}
|
||||||
|
// TODO: make the nameserver tsnet Server actually store state in kube secret
|
||||||
|
secret.ObjectMeta.Labels = cfg.labels
|
||||||
|
secret.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||||
|
secret.ObjectMeta.Namespace = cfg.namespace
|
||||||
|
// Get the secret
|
||||||
|
oldS := &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "nameserver-key", Namespace: cfg.namespace},
|
||||||
|
}
|
||||||
|
if err := kubeClient.Get(ctx, client.ObjectKeyFromObject(oldS), oldS); apierrors.IsNotFound(err) {
|
||||||
|
key, err := newAuthKey(ctx, cfg.tsClient, cfg.tags)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating new auth key: %w", err)
|
||||||
|
}
|
||||||
|
// write it to the Secret
|
||||||
|
mak.Set(&secret.StringData, "ts_auth_key", key)
|
||||||
|
return kubeClient.Create(ctx, secret)
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("error looking up 'dnsrecords' Secret: %w", err)
|
||||||
|
} else {
|
||||||
|
log.Printf("'nameserver-key' Secret exists, do nothing")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
cmDeployable = deployable{
|
cmDeployable = deployable{
|
||||||
kind: "ConfigMap",
|
kind: "ConfigMap",
|
||||||
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||||
@ -280,4 +370,32 @@ var (
|
|||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
roleDeployable = deployable{
|
||||||
|
kind: "Role",
|
||||||
|
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||||
|
role := new(rbacv1.Role)
|
||||||
|
if err := yaml.Unmarshal(roleYaml, &role); err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling Role yaml: %w", err)
|
||||||
|
}
|
||||||
|
role.ObjectMeta.Labels = cfg.labels
|
||||||
|
role.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||||
|
role.ObjectMeta.Namespace = cfg.namespace
|
||||||
|
_, err := createOrUpdate[rbacv1.Role](ctx, kubeClient, cfg.namespace, role, func(*rbacv1.Role) {})
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
roleBindingDeployable = deployable{
|
||||||
|
kind: "RoleBinding",
|
||||||
|
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||||
|
rb := new(rbacv1.RoleBinding)
|
||||||
|
if err := yaml.Unmarshal(rolebindingYaml, &rb); err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling RoleBinding yaml: %w", err)
|
||||||
|
}
|
||||||
|
rb.ObjectMeta.Labels = cfg.labels
|
||||||
|
rb.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||||
|
rb.ObjectMeta.Namespace = cfg.namespace
|
||||||
|
_, err := createOrUpdate[rbacv1.RoleBinding](ctx, kubeClient, cfg.namespace, rb, func(*rbacv1.RoleBinding) {})
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
discoveryv1 "k8s.io/api/discovery/v1"
|
discoveryv1 "k8s.io/api/discovery/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||||
@ -238,6 +239,8 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
&appsv1.StatefulSet{}: nsFilter,
|
&appsv1.StatefulSet{}: nsFilter,
|
||||||
&appsv1.Deployment{}: nsFilter,
|
&appsv1.Deployment{}: nsFilter,
|
||||||
&discoveryv1.EndpointSlice{}: nsFilter,
|
&discoveryv1.EndpointSlice{}: nsFilter,
|
||||||
|
&rbacv1.Role{}: nsFilter,
|
||||||
|
&rbacv1.RoleBinding{}: nsFilter,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Scheme: tsapi.GlobalScheme,
|
Scheme: tsapi.GlobalScheme,
|
||||||
@ -282,51 +285,66 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
startlog.Fatalf("could not create service reconciler: %v", err)
|
startlog.Fatalf("could not create service reconciler: %v", err)
|
||||||
}
|
}
|
||||||
ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress"))
|
|
||||||
// If a ProxyClassChanges, enqueue all Ingresses labeled with that
|
|
||||||
// ProxyClass's name.
|
|
||||||
proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog))
|
|
||||||
// Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes.
|
|
||||||
svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog))
|
|
||||||
err = builder.
|
err = builder.
|
||||||
ControllerManagedBy(mgr).
|
ControllerManagedBy(mgr).
|
||||||
For(&networkingv1.Ingress{}).
|
Named("proxies-reconciler").
|
||||||
Watches(&appsv1.StatefulSet{}, ingressChildFilter).
|
For(&tsapi.ClusterConfig{}).
|
||||||
Watches(&corev1.Secret{}, ingressChildFilter).
|
Complete(&proxiesReconciler{
|
||||||
Watches(&corev1.Service{}, svcHandlerForIngress).
|
|
||||||
Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress).
|
|
||||||
Complete(&IngressReconciler{
|
|
||||||
ssr: ssr,
|
ssr: ssr,
|
||||||
recorder: eventRecorder,
|
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
logger: opts.log.Named("ingress-reconciler"),
|
logger: opts.log.Named("proxies-reconciler"),
|
||||||
|
recorder: eventRecorder,
|
||||||
|
tsNamespace: opts.tailscaleNamespace,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
startlog.Fatalf("could not create ingress reconciler: %v", err)
|
startlog.Fatalf("could not create proxies reconciler: %v", err)
|
||||||
}
|
}
|
||||||
|
// ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress"))
|
||||||
|
// // If a ProxyClassChanges, enqueue all Ingresses labeled with that
|
||||||
|
// // ProxyClass's name.
|
||||||
|
// proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog))
|
||||||
|
// // Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes.
|
||||||
|
// svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog))
|
||||||
|
// err = builder.
|
||||||
|
// ControllerManagedBy(mgr).
|
||||||
|
// For(&networkingv1.Ingress{}).
|
||||||
|
// Watches(&appsv1.StatefulSet{}, ingressChildFilter).
|
||||||
|
// Watches(&corev1.Secret{}, ingressChildFilter).
|
||||||
|
// Watches(&corev1.Service{}, svcHandlerForIngress).
|
||||||
|
// Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress).
|
||||||
|
// Complete(&IngressReconciler{
|
||||||
|
// ssr: ssr,
|
||||||
|
// recorder: eventRecorder,
|
||||||
|
// Client: mgr.GetClient(),
|
||||||
|
// logger: opts.log.Named("ingress-reconciler"),
|
||||||
|
// })
|
||||||
|
// if err != nil {
|
||||||
|
// startlog.Fatalf("could not create ingress reconciler: %v", err)
|
||||||
|
// }
|
||||||
|
|
||||||
connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
// connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
||||||
// If a ProxyClassChanges, enqueue all Connectors that have
|
// // If a ProxyClassChanges, enqueue all Connectors that have
|
||||||
// .spec.proxyClass set to the name of this ProxyClass.
|
// // .spec.proxyClass set to the name of this ProxyClass.
|
||||||
proxyClassFilterForConnector := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForConnector(mgr.GetClient(), startlog))
|
// proxyClassFilterForConnector := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForConnector(mgr.GetClient(), startlog))
|
||||||
err = builder.ControllerManagedBy(mgr).
|
// err = builder.ControllerManagedBy(mgr).
|
||||||
For(&tsapi.Connector{}).
|
// For(&tsapi.Connector{}).
|
||||||
Watches(&appsv1.StatefulSet{}, connectorFilter).
|
// Watches(&appsv1.StatefulSet{}, connectorFilter).
|
||||||
Watches(&corev1.Secret{}, connectorFilter).
|
// Watches(&corev1.Secret{}, connectorFilter).
|
||||||
Watches(&tsapi.ProxyClass{}, proxyClassFilterForConnector).
|
// Watches(&tsapi.ProxyClass{}, proxyClassFilterForConnector).
|
||||||
Complete(&ConnectorReconciler{
|
// Complete(&ConnectorReconciler{
|
||||||
ssr: ssr,
|
// ssr: ssr,
|
||||||
recorder: eventRecorder,
|
// recorder: eventRecorder,
|
||||||
Client: mgr.GetClient(),
|
// Client: mgr.GetClient(),
|
||||||
logger: opts.log.Named("connector-reconciler"),
|
// logger: opts.log.Named("connector-reconciler"),
|
||||||
clock: tstime.DefaultClock{},
|
// clock: tstime.DefaultClock{},
|
||||||
})
|
// })
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
startlog.Fatalf("could not create connector reconciler: %v", err)
|
// startlog.Fatalf("could not create connector reconciler: %v", err)
|
||||||
}
|
// }
|
||||||
// TODO (irbekrm): switch to metadata-only watches for resources whose
|
// TODO (irbekrm): switch to metadata-only watches for resources whose
|
||||||
// spec we don't need to inspect to reduce memory consumption.
|
// spec we don't need to inspect to reduce memory consumption.
|
||||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/1159
|
// https://github.com/kubernetes-sigs/controller-runtime/issues/1159
|
||||||
|
// TODO: watch for proxy config ConfigMap change events
|
||||||
nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver"))
|
nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver"))
|
||||||
err = builder.ControllerManagedBy(mgr).
|
err = builder.ControllerManagedBy(mgr).
|
||||||
For(&tsapi.DNSConfig{}).
|
For(&tsapi.DNSConfig{}).
|
||||||
@ -337,52 +355,54 @@ func runReconcilers(opts reconcilerOpts) {
|
|||||||
Complete(&NameserverReconciler{
|
Complete(&NameserverReconciler{
|
||||||
recorder: eventRecorder,
|
recorder: eventRecorder,
|
||||||
tsNamespace: opts.tailscaleNamespace,
|
tsNamespace: opts.tailscaleNamespace,
|
||||||
|
tsClient: opts.tsClient,
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
logger: opts.log.Named("nameserver-reconciler"),
|
logger: opts.log.Named("nameserver-reconciler"),
|
||||||
clock: tstime.DefaultClock{},
|
clock: tstime.DefaultClock{},
|
||||||
|
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
startlog.Fatalf("could not create nameserver reconciler: %v", err)
|
startlog.Fatalf("could not create nameserver reconciler: %v", err)
|
||||||
}
|
}
|
||||||
err = builder.ControllerManagedBy(mgr).
|
// err = builder.ControllerManagedBy(mgr).
|
||||||
For(&tsapi.ProxyClass{}).
|
// For(&tsapi.ProxyClass{}).
|
||||||
Complete(&ProxyClassReconciler{
|
// Complete(&ProxyClassReconciler{
|
||||||
Client: mgr.GetClient(),
|
// Client: mgr.GetClient(),
|
||||||
recorder: eventRecorder,
|
// recorder: eventRecorder,
|
||||||
logger: opts.log.Named("proxyclass-reconciler"),
|
// logger: opts.log.Named("proxyclass-reconciler"),
|
||||||
clock: tstime.DefaultClock{},
|
// clock: tstime.DefaultClock{},
|
||||||
})
|
// })
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
startlog.Fatal("could not create proxyclass reconciler: %v", err)
|
// startlog.Fatal("could not create proxyclass reconciler: %v", err)
|
||||||
}
|
// }
|
||||||
logger := startlog.Named("dns-records-reconciler-event-handlers")
|
// logger := startlog.Named("dns-records-reconciler-event-handlers")
|
||||||
// On EndpointSlice events, if it is an EndpointSlice for an
|
// // On EndpointSlice events, if it is an EndpointSlice for an
|
||||||
// ingress/egress proxy headless Service, reconcile the headless
|
// // ingress/egress proxy headless Service, reconcile the headless
|
||||||
// Service.
|
// // Service.
|
||||||
dnsRREpsOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerEndpointSliceHandler)
|
// dnsRREpsOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerEndpointSliceHandler)
|
||||||
// On DNSConfig changes, reconcile all headless Services for
|
// // On DNSConfig changes, reconcile all headless Services for
|
||||||
// ingress/egress proxies in operator namespace.
|
// // ingress/egress proxies in operator namespace.
|
||||||
dnsRRDNSConfigOpts := handler.EnqueueRequestsFromMapFunc(enqueueAllIngressEgressProxySvcsInNS(opts.tailscaleNamespace, mgr.GetClient(), logger))
|
// dnsRRDNSConfigOpts := handler.EnqueueRequestsFromMapFunc(enqueueAllIngressEgressProxySvcsInNS(opts.tailscaleNamespace, mgr.GetClient(), logger))
|
||||||
// On Service events, if it is an ingress/egress proxy headless Service, reconcile it.
|
// // On Service events, if it is an ingress/egress proxy headless Service, reconcile it.
|
||||||
dnsRRServiceOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerServiceHandler)
|
// dnsRRServiceOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerServiceHandler)
|
||||||
// On Ingress events, if it is a tailscale Ingress or if tailscale is the default ingress controller, reconcile the proxy
|
// // On Ingress events, if it is a tailscale Ingress or if tailscale is the default ingress controller, reconcile the proxy
|
||||||
// headless Service.
|
// // headless Service.
|
||||||
dnsRRIngressOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerIngressHandler(opts.tailscaleNamespace, opts.proxyActAsDefaultLoadBalancer, mgr.GetClient(), logger))
|
// dnsRRIngressOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerIngressHandler(opts.tailscaleNamespace, opts.proxyActAsDefaultLoadBalancer, mgr.GetClient(), logger))
|
||||||
err = builder.ControllerManagedBy(mgr).
|
// err = builder.ControllerManagedBy(mgr).
|
||||||
Named("dns-records-reconciler").
|
// Named("dns-records-reconciler").
|
||||||
Watches(&corev1.Service{}, dnsRRServiceOpts).
|
// Watches(&corev1.Service{}, dnsRRServiceOpts).
|
||||||
Watches(&networkingv1.Ingress{}, dnsRRIngressOpts).
|
// Watches(&networkingv1.Ingress{}, dnsRRIngressOpts).
|
||||||
Watches(&discoveryv1.EndpointSlice{}, dnsRREpsOpts).
|
// Watches(&discoveryv1.EndpointSlice{}, dnsRREpsOpts).
|
||||||
Watches(&tsapi.DNSConfig{}, dnsRRDNSConfigOpts).
|
// Watches(&tsapi.DNSConfig{}, dnsRRDNSConfigOpts).
|
||||||
Complete(&dnsRecordsReconciler{
|
// Complete(&dnsRecordsReconciler{
|
||||||
Client: mgr.GetClient(),
|
// Client: mgr.GetClient(),
|
||||||
tsNamespace: opts.tailscaleNamespace,
|
// tsNamespace: opts.tailscaleNamespace,
|
||||||
logger: opts.log.Named("dns-records-reconciler"),
|
// logger: opts.log.Named("dns-records-reconciler"),
|
||||||
isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
// isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
||||||
})
|
// })
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
startlog.Fatalf("could not create DNS records reconciler: %v", err)
|
// startlog.Fatalf("could not create DNS records reconciler: %v", err)
|
||||||
}
|
// }
|
||||||
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
|
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
|
||||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||||
startlog.Fatalf("could not start manager: %v", err)
|
startlog.Fatalf("could not start manager: %v", err)
|
||||||
|
94
cmd/k8s-operator/proxies.go
Normal file
94
cmd/k8s-operator/proxies.go
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type proxiesReconciler struct {
|
||||||
|
client.Client
|
||||||
|
logger *zap.SugaredLogger
|
||||||
|
|
||||||
|
recorder record.EventRecorder
|
||||||
|
ssr *tailscaleSTSReconciler
|
||||||
|
|
||||||
|
tsNamespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *proxiesReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
||||||
|
logger := pr.logger.With("ClusterConfig", req.Name)
|
||||||
|
logger.Debugf("starting reconcile")
|
||||||
|
defer logger.Debugf("reconcile finished")
|
||||||
|
|
||||||
|
cc := new(tsapi.ClusterConfig)
|
||||||
|
err = pr.Get(ctx, req.NamespacedName, cc)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
// Request object not found, could have been deleted after reconcile request.
|
||||||
|
logger.Debugf("ClusterConfig not found, assuming it was deleted")
|
||||||
|
return reconcile.Result{}, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return reconcile.Result{}, fmt.Errorf("failed to get ClusterConfig: %w", err)
|
||||||
|
}
|
||||||
|
ownerRef := metav1.NewControllerRef(cc, tsapi.SchemeGroupVersion.WithKind("ClusterConfig"))
|
||||||
|
|
||||||
|
// For this prototype the number of proxy nodes is hardcoded to 4,
|
||||||
|
// Service CIDR range hardcoded to 100.64.2.0/24
|
||||||
|
// https://www.davidc.net/sites/default/subnets/subnets.html
|
||||||
|
cidrs := []string{"100.64.2.0/26", "100.64.2.64/26", "100.64.2.128/26", "100.64.2.192/26"}
|
||||||
|
stsCfg := &tailscaleSTSConfig{
|
||||||
|
name: cc.Name,
|
||||||
|
serviceCIDRs: cidrs,
|
||||||
|
clusterConfOwnerRef: ownerRef,
|
||||||
|
}
|
||||||
|
if err = pr.ssr.Provision(ctx, logger, stsCfg); err != nil {
|
||||||
|
return reconcile.Result{}, fmt.Errorf("error provision proxy: %w", err)
|
||||||
|
}
|
||||||
|
// logger.Debugf("finished reconciling index %d ", i)
|
||||||
|
// Now watch for Secret changes, pull out device info and update cluster config status
|
||||||
|
return reconcile.Result{}, nil
|
||||||
|
|
||||||
|
// // build opts
|
||||||
|
// stsCfg := &tailscaleSTSConfig{
|
||||||
|
// Tags: []string{"tag:k8s"},
|
||||||
|
// HostnameTemplate: class.Name,
|
||||||
|
// serviceClass: class.Name,
|
||||||
|
// dnsAddr: cidr.Addr(),
|
||||||
|
// serviceCIDR: []netip.Prefix{cidr},
|
||||||
|
// numProxies: class.NumProxies,
|
||||||
|
// }
|
||||||
|
// defaultClassCIDR = []netip.Prefix{cidr}
|
||||||
|
|
||||||
|
// // write DNS addr to the ServiceRecords ConfigMap
|
||||||
|
// cm := &corev1.ConfigMap{}
|
||||||
|
// if err := pr.Get(ctx, types.NamespacedName{Namespace: pr.tsNamespace, Name: "servicerecords"}, cm); err != nil {
|
||||||
|
// return reconcile.Result{}, fmt.Errorf("error getting serviceRecords ConfigMap: %w", err)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// var serviceRecords *kube.Records
|
||||||
|
// if serviceRecordsB := cm.BinaryData["serviceRecords"]; len(serviceRecordsB) == 0 {
|
||||||
|
// serviceRecords = &kube.Records{Version: kube.Alpha1Version}
|
||||||
|
// } else {
|
||||||
|
// if err := json.Unmarshal(cm.BinaryData["serviceRecords"], serviceRecords); err != nil {
|
||||||
|
// return reconcile.Result{}, fmt.Errorf("error unmarshalling service records: %w", err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// // Remove, this will only get passed as env var to the proxies
|
||||||
|
// if dnsAddr := serviceRecords.DNSAddr; dnsAddr != "" {
|
||||||
|
// logger.Info("DNS addr already set to %s", dnsAddr)
|
||||||
|
// return reconcile.Result{}, nil
|
||||||
|
// }
|
||||||
|
// dnsAddr := defaultClassCIDR[0].Addr()
|
||||||
|
// serviceRecords.DNSAddr = dnsAddr.String()
|
||||||
|
// serviceRecordsB, err := json.Marshal(serviceRecords)
|
||||||
|
// cm.BinaryData["serviceRecords"] = serviceRecordsB
|
||||||
|
|
||||||
|
// return reconcile.Result{}, pr.Update(ctx, cm)
|
||||||
|
}
|
52
cmd/k8s-operator/proxynodes.go
Normal file
52
cmd/k8s-operator/proxynodes.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: probably remove this file
|
||||||
|
func proxycidr() {
|
||||||
|
clusterDomain := os.Getenv("TS_CLUSTER_DOMAIN")
|
||||||
|
if clusterDomain == "" {
|
||||||
|
log.Fatal("TS_CLUSTER_DOMAIN must be set")
|
||||||
|
}
|
||||||
|
// TODO: check if domain already exists for a different CIDR; if so make <cluster-domain>-<n>
|
||||||
|
|
||||||
|
// Allocate /24 and set /1 to resolve DNS for this subdomain?
|
||||||
|
serviceCIDR := os.Getenv("TS_SERVICE_CIDR")
|
||||||
|
if serviceCIDR == "" {
|
||||||
|
log.Fatal("TS_SERVICE_CIDR must be set")
|
||||||
|
}
|
||||||
|
clusterSize := os.Getenv("TS_CLUSTER_SIZE")
|
||||||
|
if clusterSize == "" {
|
||||||
|
log.Fatal("TS_CLUSTER_SIZE must be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// create clusterSize proxies, each advertizes /24
|
||||||
|
nProxies, err := strconv.Atoi(clusterSize)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s can not be converted to int: %v", clusterSize, err)
|
||||||
|
}
|
||||||
|
for range nProxies - 1 {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureProxyExists(n int) {
|
||||||
|
const (
|
||||||
|
labelserviceClass = "tailscale.com/service-class"
|
||||||
|
labelProxyID = "tailscale.com/proxy-id"
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type service struct {
|
||||||
|
ip netip.Addr
|
||||||
|
domainName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type dnsConfig struct {
|
||||||
|
dnsNamesToIPs map[string][]netip.Addr
|
||||||
|
}
|
@ -13,6 +13,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@ -25,6 +26,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
"tailscale.com/client/tailscale"
|
"tailscale.com/client/tailscale"
|
||||||
@ -32,7 +34,6 @@ import (
|
|||||||
kubeutils "tailscale.com/k8s-operator"
|
kubeutils "tailscale.com/k8s-operator"
|
||||||
tsoperator "tailscale.com/k8s-operator"
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
"tailscale.com/net/netutil"
|
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/types/opt"
|
"tailscale.com/types/opt"
|
||||||
"tailscale.com/types/ptr"
|
"tailscale.com/types/ptr"
|
||||||
@ -103,29 +104,38 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type tailscaleSTSConfig struct {
|
type tailscaleSTSConfig struct {
|
||||||
ParentResourceName string
|
// serviceClass string
|
||||||
ParentResourceUID string
|
// dnsAddr netip.Addr
|
||||||
ChildResourceLabels map[string]string
|
// numProxies int
|
||||||
|
name string
|
||||||
|
serviceCIDRs []string
|
||||||
|
clusterConfOwnerRef *metav1.OwnerReference
|
||||||
|
|
||||||
ServeConfig *ipn.ServeConfig // if serve config is set, this is a proxy for Ingress
|
|
||||||
ClusterTargetIP string // ingress target IP
|
|
||||||
ClusterTargetDNSName string // ingress target DNS name
|
|
||||||
// If set to true, operator should configure containerboot to forward
|
|
||||||
// cluster traffic via the proxy set up for Kubernetes Ingress.
|
|
||||||
ForwardClusterTrafficViaL7IngressProxy bool
|
|
||||||
|
|
||||||
TailnetTargetIP string // egress target IP
|
|
||||||
|
|
||||||
TailnetTargetFQDN string // egress target FQDN
|
|
||||||
|
|
||||||
Hostname string
|
|
||||||
Tags []string // if empty, use defaultTags
|
Tags []string // if empty, use defaultTags
|
||||||
|
ProxyClass string
|
||||||
|
|
||||||
|
// ChildResourceLabels map[string]string
|
||||||
|
|
||||||
|
// ServeConfig *ipn.ServeConfig // if serve config is set, this is a proxy for Ingress
|
||||||
|
// ClusterTargetIP string // ingress target IP
|
||||||
|
// ClusterTargetDNSName string // ingress target DNS name
|
||||||
|
// // If set to true, operator should configure containerboot to forward
|
||||||
|
// // cluster traffic via the proxy set up for Kubernetes Ingress.
|
||||||
|
// ForwardClusterTrafficViaL7IngressProxy bool
|
||||||
|
|
||||||
|
// TailnetTargetIP string // egress target IP
|
||||||
|
|
||||||
|
// TailnetTargetFQDN string // egress target FQDN
|
||||||
|
|
||||||
|
// Hostname will be <hostnameTemplate>-<n> where 'n' is the numeric id
|
||||||
|
// of the proxy
|
||||||
|
// HostnameTemplate string
|
||||||
|
|
||||||
// Connector specifies a configuration of a Connector instance if that's
|
// Connector specifies a configuration of a Connector instance if that's
|
||||||
// what this StatefulSet should be created for.
|
// what this StatefulSet should be created for.
|
||||||
Connector *connector
|
// Connector *connector
|
||||||
|
// ParentResourceName string
|
||||||
ProxyClass string
|
// ParentResourceUID string
|
||||||
}
|
}
|
||||||
|
|
||||||
type connector struct {
|
type connector struct {
|
||||||
@ -161,26 +171,45 @@ func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool {
|
|||||||
return len(a.tsnetServer.CertDomains()) > 0
|
return len(a.tsnetServer.CertDomains()) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provision ensures that the StatefulSet for the given service is running and
|
// Provision provisions a StatefulSet with n replicas for each proxy class.
|
||||||
// up to date.
|
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) error {
|
||||||
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) {
|
|
||||||
// Do full reconcile.
|
// Do full reconcile.
|
||||||
// TODO (don't create Service for the Connector)
|
// TODO (don't create Service for the Connector)
|
||||||
hsvc, err := a.reconcileHeadlessService(ctx, logger, sts)
|
// for i := 0; i < sts.numProxies; i++ {
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
// TODO: the headless Service is needed for cluster workloads to be able
|
||||||
|
// to reach the egress proxies. Move the creation of this out of this
|
||||||
|
// code altogether and create one for each exposed tailnet service in
|
||||||
|
// services-reconciler.
|
||||||
|
// hsvc, err := a.reconcileHeadlessService(ctx, logger, sts, sts.hostnameBase)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Create Secret for each proxy replica
|
||||||
|
for i, cidrS := range sts.serviceCIDRs {
|
||||||
|
cidr, err := netip.ParsePrefix(cidrS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
|
return fmt.Errorf("error parsing %s: %w", cidrS, err)
|
||||||
|
}
|
||||||
|
_, _, _, err = a.createOrGetSecret(ctx, logger, sts, i, cidr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||||
|
}
|
||||||
|
_, err = a.createOrGetCM(ctx, logger, sts, i, cidr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create or get services ConfigMap: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
|
// TODO: fix tsConfigHash
|
||||||
|
_, err := a.reconcileSTS(ctx, logger, sts, "fakeconfighash")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
|
return fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||||
}
|
}
|
||||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs)
|
return nil
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hsvc, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup removes all resources associated that were created by Provision with
|
// Cleanup removes all resources associated that were created by Provision with
|
||||||
@ -269,34 +298,63 @@ func statefulSetNameBase(parent string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) {
|
func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, hostname string) (*corev1.Service, error) {
|
||||||
nameBase := statefulSetNameBase(sts.ParentResourceName)
|
logger.Debugf("reconciling headless svc", "hostname", hostname)
|
||||||
|
nameBase := statefulSetNameBase(hostname)
|
||||||
hsvc := &corev1.Service{
|
hsvc := &corev1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: nameBase,
|
GenerateName: nameBase,
|
||||||
Namespace: a.operatorNamespace,
|
Namespace: a.operatorNamespace,
|
||||||
Labels: sts.ChildResourceLabels,
|
Labels: map[string]string{"app": hostname},
|
||||||
|
OwnerReferences: []metav1.OwnerReference{*sts.clusterConfOwnerRef},
|
||||||
},
|
},
|
||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "None",
|
ClusterIP: "None",
|
||||||
Selector: map[string]string{
|
Selector: map[string]string{
|
||||||
"app": sts.ParentResourceUID,
|
"app": hostname,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
logger.Debugf("reconciling headless service for StatefulSet")
|
logger.Debugf("reconciling headless service for StatefulSet", "namebase", nameBase)
|
||||||
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
|
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
|
||||||
}
|
}
|
||||||
|
func (a *tailscaleSTSReconciler) createOrGetCM(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, i int, cidr netip.Prefix) (string, error) {
|
||||||
|
hostname := fmt.Sprintf("%s-%d", stsC.name, i)
|
||||||
|
cm := &corev1.ConfigMap{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: hostname,
|
||||||
|
Namespace: a.operatorNamespace,
|
||||||
|
Labels: map[string]string{"proxy-index": hostname, "proxies": stsC.name, "component": "proxies"},
|
||||||
|
OwnerReferences: []metav1.OwnerReference{*stsC.clusterConfOwnerRef},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) {
|
||||||
|
proxyConfig := &kubeutils.ProxyConfig{
|
||||||
|
ServicesCIDRRange: cidr,
|
||||||
|
}
|
||||||
|
proxyConfigBytes, err := json.Marshal(proxyConfig)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error marshalling config for proxy %s: %w", hostname, err)
|
||||||
|
}
|
||||||
|
mak.Set(&cm.BinaryData, "proxyConfig", proxyConfigBytes)
|
||||||
|
logger.Infof("creating services ConfigMap %s", hostname)
|
||||||
|
return hostname, a.Create(ctx, cm)
|
||||||
|
} else if err != nil {
|
||||||
|
return "", fmt.Errorf("error getting ConfigMap %s", hostname)
|
||||||
|
}
|
||||||
|
// For this prototype, the Services CIDR written to the ConfigMap is
|
||||||
|
// never updated.
|
||||||
|
return hostname, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) {
|
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, i int, serviceCIDR netip.Prefix) (secretName, hash string, configs tailscaleConfigs, _ error) {
|
||||||
|
hostname := fmt.Sprintf("%s-%d", stsC.name, i)
|
||||||
secret := &corev1.Secret{
|
secret := &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
// Hardcode a -0 suffix so that in future, if we support
|
Name: hostname,
|
||||||
// multiple StatefulSet replicas, we can provision -N for
|
|
||||||
// those.
|
|
||||||
Name: hsvc.Name + "-0",
|
|
||||||
Namespace: a.operatorNamespace,
|
Namespace: a.operatorNamespace,
|
||||||
Labels: stsC.ChildResourceLabels,
|
Labels: map[string]string{"app": hostname},
|
||||||
|
OwnerReferences: []metav1.OwnerReference{*stsC.clusterConfOwnerRef},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var orig *corev1.Secret // unmodified copy of secret
|
var orig *corev1.Secret // unmodified copy of secret
|
||||||
@ -312,7 +370,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
|||||||
// Initially it contains only tailscaled config, but when the
|
// Initially it contains only tailscaled config, but when the
|
||||||
// proxy starts, it will also store there the state, certs and
|
// proxy starts, it will also store there the state, certs and
|
||||||
// ACME account key.
|
// ACME account key.
|
||||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels)
|
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, map[string]string{"app": hostname})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", nil, err
|
return "", "", nil, err
|
||||||
}
|
}
|
||||||
@ -334,7 +392,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
|||||||
return "", "", nil, err
|
return "", "", nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
configs, err := tailscaledConfig(stsC, authKey, orig)
|
configs, err := tailscaledConfig(stsC, authKey, orig, hostname, serviceCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||||
}
|
}
|
||||||
@ -358,14 +416,6 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if stsC.ServeConfig != nil {
|
|
||||||
j, err := json.Marshal(stsC.ServeConfig)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", nil, err
|
|
||||||
}
|
|
||||||
mak.Set(&secret.StringData, "serve-config", string(j))
|
|
||||||
}
|
|
||||||
|
|
||||||
if orig != nil {
|
if orig != nil {
|
||||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||||
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
||||||
@ -445,13 +495,14 @@ var proxyYaml []byte
|
|||||||
//go:embed deploy/manifests/userspace-proxy.yaml
|
//go:embed deploy/manifests/userspace-proxy.yaml
|
||||||
var userspaceProxyYaml []byte
|
var userspaceProxyYaml []byte
|
||||||
|
|
||||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) {
|
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, tsConfigHash string) (*appsv1.StatefulSet, error) {
|
||||||
ss := new(appsv1.StatefulSet)
|
ss := new(appsv1.StatefulSet)
|
||||||
if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
// if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
||||||
if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
// if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal userspace proxy spec: %v", err)
|
// return nil, fmt.Errorf("failed to unmarshal userspace proxy spec: %v", err)
|
||||||
}
|
// }
|
||||||
} else {
|
// } else {
|
||||||
|
|
||||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||||
}
|
}
|
||||||
@ -462,7 +513,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
pod := &ss.Spec.Template
|
pod := &ss.Spec.Template
|
||||||
container := &pod.Spec.Containers[0]
|
container := &pod.Spec.Containers[0]
|
||||||
proxyClass := new(tsapi.ProxyClass)
|
proxyClass := new(tsapi.ProxyClass)
|
||||||
@ -477,64 +527,82 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
|||||||
}
|
}
|
||||||
container.Image = a.proxyImage
|
container.Image = a.proxyImage
|
||||||
ss.ObjectMeta = metav1.ObjectMeta{
|
ss.ObjectMeta = metav1.ObjectMeta{
|
||||||
Name: headlessSvc.Name,
|
Name: sts.name,
|
||||||
Namespace: a.operatorNamespace,
|
Namespace: a.operatorNamespace,
|
||||||
|
OwnerReferences: []metav1.OwnerReference{*sts.clusterConfOwnerRef},
|
||||||
}
|
}
|
||||||
for key, val := range sts.ChildResourceLabels {
|
|
||||||
mak.Set(&ss.ObjectMeta.Labels, key, val)
|
|
||||||
}
|
|
||||||
ss.Spec.ServiceName = headlessSvc.Name
|
|
||||||
ss.Spec.Selector = &metav1.LabelSelector{
|
ss.Spec.Selector = &metav1.LabelSelector{
|
||||||
MatchLabels: map[string]string{
|
MatchLabels: map[string]string{
|
||||||
"app": sts.ParentResourceUID,
|
"app": sts.name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mak.Set(&pod.Labels, "app", sts.ParentResourceUID)
|
ss.Spec.Replicas = pointer.Int32(int32(len(sts.serviceCIDRs)))
|
||||||
for key, val := range sts.ChildResourceLabels {
|
mak.Set(&pod.Labels, "app", sts.name)
|
||||||
pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generic containerboot configuration options.
|
// Generic containerboot configuration options.
|
||||||
container.Env = append(container.Env,
|
container.Env = append(container.Env,
|
||||||
corev1.EnvVar{
|
corev1.EnvVar{
|
||||||
Name: "TS_KUBE_SECRET",
|
Name: "TS_KUBE_SECRET",
|
||||||
Value: proxySecret,
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"},
|
||||||
},
|
},
|
||||||
corev1.EnvVar{
|
|
||||||
// Old tailscaled config key is still used for backwards compatibility.
|
|
||||||
Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
|
|
||||||
Value: "/etc/tsconfig/tailscaled",
|
|
||||||
},
|
},
|
||||||
|
// No backwards compat here
|
||||||
|
// corev1.EnvVar{
|
||||||
|
// // Old tailscaled config key is still used for backwards compatibility.
|
||||||
|
// Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
|
||||||
|
// Value: "/etc/tsconfig/tailscaled",
|
||||||
|
// },
|
||||||
corev1.EnvVar{
|
corev1.EnvVar{
|
||||||
// New style is in the form of cap-<capability-version>.hujson.
|
// New style is in the form of cap-<capability-version>.hujson.
|
||||||
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
|
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
|
||||||
Value: "/etc/tsconfig",
|
Value: "/etc/tsconfig/$(POD_NAME)",
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "TS_EXPERIMENTAL_SERVICES_CONFIG_PATH",
|
||||||
|
Value: "/etc/$(POD_NAME)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if sts.ForwardClusterTrafficViaL7IngressProxy {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS",
|
|
||||||
Value: "true",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// Configure containeboot to run tailscaled with a configfile read from the state Secret.
|
|
||||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash)
|
|
||||||
|
|
||||||
|
// Mount all tailscaled configs, each Pod only reads the one from
|
||||||
|
// $(POD_NAME) Secret.
|
||||||
|
// There is no way how to mount a different Secret for each replica.
|
||||||
|
for i := range len(sts.serviceCIDRs) {
|
||||||
|
// Mount the individual tailscaled state for each replica
|
||||||
configVolume := corev1.Volume{
|
configVolume := corev1.Volume{
|
||||||
Name: "tailscaledconfig",
|
Name: fmt.Sprintf("tailscaledconfig-%d", i),
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
Secret: &corev1.SecretVolumeSource{
|
Secret: &corev1.SecretVolumeSource{
|
||||||
SecretName: proxySecret,
|
SecretName: fmt.Sprintf("%s-%d", sts.name, i),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||||
Name: "tailscaledconfig",
|
Name: fmt.Sprintf("tailscaledconfig-%d", i),
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
MountPath: "/etc/tsconfig",
|
MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", sts.name, i),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
servicesConfigV := corev1.Volume{
|
||||||
|
Name: fmt.Sprintf("servicesconfig-%d", i),
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{Name: fmt.Sprintf("%s-%d", sts.name, i)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, servicesConfigV)
|
||||||
|
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||||
|
Name: fmt.Sprintf("servicesconfig-%d", i),
|
||||||
|
ReadOnly: true,
|
||||||
|
MountPath: fmt.Sprintf("/etc/%s-%d", sts.name, i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure containeboot to run tailscaled with a configfile read from the state Secret.
|
||||||
|
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash)
|
||||||
|
|
||||||
if a.tsFirewallMode != "" {
|
if a.tsFirewallMode != "" {
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
container.Env = append(container.Env, corev1.EnvVar{
|
||||||
Name: "TS_DEBUG_FIREWALL_MODE",
|
Name: "TS_DEBUG_FIREWALL_MODE",
|
||||||
@ -543,51 +611,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
|||||||
}
|
}
|
||||||
pod.Spec.PriorityClassName = a.proxyPriorityClassName
|
pod.Spec.PriorityClassName = a.proxyPriorityClassName
|
||||||
|
|
||||||
// Ingress/egress proxy configuration options.
|
|
||||||
if sts.ClusterTargetIP != "" {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "TS_DEST_IP",
|
|
||||||
Value: sts.ClusterTargetIP,
|
|
||||||
})
|
|
||||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetClusterIP, sts.ClusterTargetIP)
|
|
||||||
} else if sts.ClusterTargetDNSName != "" {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "TS_EXPERIMENTAL_DEST_DNS_NAME",
|
|
||||||
Value: sts.ClusterTargetDNSName,
|
|
||||||
})
|
|
||||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetClusterDNSName, sts.ClusterTargetDNSName)
|
|
||||||
} else if sts.TailnetTargetIP != "" {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "TS_TAILNET_TARGET_IP",
|
|
||||||
Value: sts.TailnetTargetIP,
|
|
||||||
})
|
|
||||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetTailnetTargetIP, sts.TailnetTargetIP)
|
|
||||||
} else if sts.TailnetTargetFQDN != "" {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "TS_TAILNET_TARGET_FQDN",
|
|
||||||
Value: sts.TailnetTargetFQDN,
|
|
||||||
})
|
|
||||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetTailnetTargetFQDN, sts.TailnetTargetFQDN)
|
|
||||||
} else if sts.ServeConfig != nil {
|
|
||||||
container.Env = append(container.Env, corev1.EnvVar{
|
|
||||||
Name: "TS_SERVE_CONFIG",
|
|
||||||
Value: "/etc/tailscaled/serve-config",
|
|
||||||
})
|
|
||||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
|
||||||
Name: "serve-config",
|
|
||||||
ReadOnly: true,
|
|
||||||
MountPath: "/etc/tailscaled",
|
|
||||||
})
|
|
||||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
|
|
||||||
Name: "serve-config",
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
Secret: &corev1.SecretVolumeSource{
|
|
||||||
SecretName: proxySecret,
|
|
||||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName())
|
logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName())
|
||||||
if sts.ProxyClass != "" {
|
if sts.ProxyClass != "" {
|
||||||
logger.Debugf("configuring proxy resources with ProxyClass %s", sts.ProxyClass)
|
logger.Debugf("configuring proxy resources with ProxyClass %s", sts.ProxyClass)
|
||||||
@ -626,22 +649,22 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
|||||||
if pc == nil || ss == nil {
|
if pc == nil || ss == nil {
|
||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
|
// if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
|
||||||
if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
// if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||||
enableMetrics(ss, pc)
|
// enableMetrics(ss, pc)
|
||||||
} else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
// } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||||
// TODO (irbekrm): fix this
|
// // TODO (irbekrm): fix this
|
||||||
// For Ingress proxies that have been configured with
|
// // For Ingress proxies that have been configured with
|
||||||
// tailscale.com/experimental-forward-cluster-traffic-via-ingress
|
// // tailscale.com/experimental-forward-cluster-traffic-via-ingress
|
||||||
// annotation, all cluster traffic is forwarded to the
|
// // annotation, all cluster traffic is forwarded to the
|
||||||
// Ingress backend(s).
|
// // Ingress backend(s).
|
||||||
logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
// logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||||
} else {
|
// } else {
|
||||||
// TODO (irbekrm): fix this
|
// // TODO (irbekrm): fix this
|
||||||
// For egress proxies, currently all cluster traffic is forwarded to the tailnet target.
|
// // For egress proxies, currently all cluster traffic is forwarded to the tailnet target.
|
||||||
logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
// logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
if pc.Spec.StatefulSet == nil {
|
if pc.Spec.StatefulSet == nil {
|
||||||
return ss
|
return ss
|
||||||
@ -742,29 +765,21 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) {
|
|||||||
// TODO (irbekrm): remove the legacy config once we no longer need to support
|
// TODO (irbekrm): remove the legacy config once we no longer need to support
|
||||||
// versions older than cap94,
|
// versions older than cap94,
|
||||||
// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies
|
// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies
|
||||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) {
|
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret, hostname string, serviceCIDR netip.Prefix) (tailscaleConfigs, error) {
|
||||||
conf := &ipn.ConfigVAlpha{
|
conf := &ipn.ConfigVAlpha{
|
||||||
Version: "alpha0",
|
Version: "alpha0",
|
||||||
AcceptDNS: "false",
|
AcceptDNS: "false",
|
||||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||||
Locked: "false",
|
Locked: "false",
|
||||||
Hostname: &stsC.Hostname,
|
Hostname: pointer.String(hostname),
|
||||||
NoStatefulFiltering: "false",
|
AdvertiseRoutes: []netip.Prefix{serviceCIDR},
|
||||||
|
|
||||||
|
// TODO: either we switch stateful filter off for all proxies or
|
||||||
|
// we cannot share nodes between ingress and egress proxies
|
||||||
|
// Although this is now off by default?
|
||||||
|
NoStatefulFiltering: "true",
|
||||||
}
|
}
|
||||||
|
|
||||||
// For egress proxies only, we need to ensure that stateful filtering is
|
|
||||||
// not in place so that traffic from cluster can be forwarded via
|
|
||||||
// Tailscale IPs.
|
|
||||||
if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" {
|
|
||||||
conf.NoStatefulFiltering = "true"
|
|
||||||
}
|
|
||||||
if stsC.Connector != nil {
|
|
||||||
routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error calculating routes: %w", err)
|
|
||||||
}
|
|
||||||
conf.AdvertiseRoutes = routes
|
|
||||||
}
|
|
||||||
if newAuthkey != "" {
|
if newAuthkey != "" {
|
||||||
conf.AuthKey = &newAuthkey
|
conf.AuthKey = &newAuthkey
|
||||||
} else if oldSecret != nil {
|
} else if oldSecret != nil {
|
||||||
|
@ -7,12 +7,16 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand/v2"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gaissmai/bart"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -20,16 +24,19 @@ import (
|
|||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
kubeutils "tailscale.com/k8s-operator"
|
||||||
tsoperator "tailscale.com/k8s-operator"
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
"tailscale.com/net/dns/resolvconffile"
|
"tailscale.com/net/dns/resolvconffile"
|
||||||
"tailscale.com/util/clientmetric"
|
"tailscale.com/util/clientmetric"
|
||||||
|
"tailscale.com/util/mak"
|
||||||
"tailscale.com/util/set"
|
"tailscale.com/util/set"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
resolvConfPath = "/etc/resolv.conf"
|
resolvConfPath = "/etc/resolv.conf"
|
||||||
defaultClusterDomain = "cluster.local"
|
defaultClusterDomain = "cluster.local"
|
||||||
|
serviceDNSNameAnnotation = "tailscale.com/service-dns-name"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ServiceReconciler struct {
|
type ServiceReconciler struct {
|
||||||
@ -90,13 +97,6 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
|
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
|
||||||
}
|
}
|
||||||
targetIP := tailnetTargetAnnotation(svc)
|
|
||||||
targetFQDN := svc.Annotations[AnnotationTailnetTargetFQDN]
|
|
||||||
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) && targetIP == "" && targetFQDN == "" {
|
|
||||||
logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up")
|
|
||||||
return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc)
|
|
||||||
}
|
|
||||||
|
|
||||||
return reconcile.Result{}, a.maybeProvision(ctx, logger, svc)
|
return reconcile.Result{}, a.maybeProvision(ctx, logger, svc)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,145 +150,85 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
|||||||
// This function adds a finalizer to svc, ensuring that we can handle orderly
|
// This function adds a finalizer to svc, ensuring that we can handle orderly
|
||||||
// deprovisioning later.
|
// deprovisioning later.
|
||||||
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
|
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
|
||||||
// Run for proxy config related validations here as opposed to running
|
// Take a look at the Service
|
||||||
// them earlier. This is to prevent cleanup being blocked on a
|
// If it is an ingress Service (expose annotation or load balancer)
|
||||||
// misconfigured proxy param.
|
// Add a record to the config map
|
||||||
if err := a.ssr.validate(); err != nil {
|
|
||||||
msg := fmt.Sprintf("unable to provision proxy resources: invalid config: %v", err)
|
// This prototype only looks at ingress Services
|
||||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDCONFIG", msg)
|
if !a.shouldExpose(svc) {
|
||||||
a.logger.Error(msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if violations := validateService(svc); len(violations) > 0 {
|
|
||||||
msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", "))
|
|
||||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVCICE", msg)
|
|
||||||
a.logger.Error(msg)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
proxyClass := proxyClassForObject(svc)
|
// get clusterconfig
|
||||||
if proxyClass != "" {
|
// Exactly one ClusterConfig needs to exist, else we don't proceed.
|
||||||
if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil {
|
ccl := &tsapi.ClusterConfigList{}
|
||||||
return fmt.Errorf("error verifying ProxyClass for Service: %w", err)
|
if err := a.List(ctx, ccl); err != nil {
|
||||||
} else if !ready {
|
return fmt.Errorf("error listing ClusterConfigs: %w", err)
|
||||||
logger.Infof("ProxyClass %s specified for the Service, but is not (yet) Ready, waiting..", proxyClass)
|
}
|
||||||
|
if len(ccl.Items) < 1 {
|
||||||
|
logger.Info("got %d ClusterConfigs", len(ccl.Items))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
if svc.Spec.ClusterIP == "" {
|
||||||
|
logger.Info("[unexpected] Service has no ClusterIP")
|
||||||
hostname, err := nameForService(svc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !slices.Contains(svc.Finalizers, FinalizerName) {
|
|
||||||
// This log line is printed exactly once during initial provisioning,
|
|
||||||
// because once the finalizer is in place this block gets skipped. So,
|
|
||||||
// this is a nice place to tell the operator that the high level,
|
|
||||||
// multi-reconcile operation is underway.
|
|
||||||
logger.Infof("exposing service over tailscale")
|
|
||||||
svc.Finalizers = append(svc.Finalizers, FinalizerName)
|
|
||||||
if err := a.Update(ctx, svc); err != nil {
|
|
||||||
return fmt.Errorf("failed to add finalizer: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
crl := childResourceLabels(svc.Name, svc.Namespace, "svc")
|
|
||||||
var tags []string
|
|
||||||
if tstr, ok := svc.Annotations[AnnotationTags]; ok {
|
|
||||||
tags = strings.Split(tstr, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
sts := &tailscaleSTSConfig{
|
|
||||||
ParentResourceName: svc.Name,
|
|
||||||
ParentResourceUID: string(svc.UID),
|
|
||||||
Hostname: hostname,
|
|
||||||
Tags: tags,
|
|
||||||
ChildResourceLabels: crl,
|
|
||||||
ProxyClass: proxyClass,
|
|
||||||
}
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
if a.shouldExposeClusterIP(svc) {
|
|
||||||
sts.ClusterTargetIP = svc.Spec.ClusterIP
|
|
||||||
a.managedIngressProxies.Add(svc.UID)
|
|
||||||
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
|
|
||||||
} else if a.shouldExposeDNSName(svc) {
|
|
||||||
sts.ClusterTargetDNSName = svc.Spec.ExternalName
|
|
||||||
a.managedIngressProxies.Add(svc.UID)
|
|
||||||
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
|
|
||||||
} else if ip := tailnetTargetAnnotation(svc); ip != "" {
|
|
||||||
sts.TailnetTargetIP = ip
|
|
||||||
a.managedEgressProxies.Add(svc.UID)
|
|
||||||
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
|
|
||||||
} else if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" {
|
|
||||||
fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]
|
|
||||||
if !strings.HasSuffix(fqdn, ".") {
|
|
||||||
fqdn = fqdn + "."
|
|
||||||
}
|
|
||||||
sts.TailnetTargetFQDN = fqdn
|
|
||||||
a.managedEgressProxies.Add(svc.UID)
|
|
||||||
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
|
|
||||||
}
|
|
||||||
a.mu.Unlock()
|
|
||||||
|
|
||||||
var hsvc *corev1.Service
|
|
||||||
if hsvc, err = a.ssr.Provision(ctx, logger, sts); err != nil {
|
|
||||||
return fmt.Errorf("failed to provision: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sts.TailnetTargetIP != "" || sts.TailnetTargetFQDN != "" {
|
|
||||||
clusterDomain := retrieveClusterDomain(a.tsNamespace, logger)
|
|
||||||
headlessSvcName := hsvc.Name + "." + hsvc.Namespace + ".svc." + clusterDomain
|
|
||||||
if svc.Spec.ExternalName != headlessSvcName || svc.Spec.Type != corev1.ServiceTypeExternalName {
|
|
||||||
svc.Spec.ExternalName = headlessSvcName
|
|
||||||
svc.Spec.Selector = nil
|
|
||||||
svc.Spec.Type = corev1.ServiceTypeExternalName
|
|
||||||
if err := a.Update(ctx, svc); err != nil {
|
|
||||||
return fmt.Errorf("failed to update service: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) {
|
cc := ccl.Items[0]
|
||||||
logger.Debugf("service is not a LoadBalancer, so not updating ingress")
|
svcDNSName := a.fqdnsForSvc(svc, cc.Spec.Domain)
|
||||||
return nil
|
logger.Debugf("determined DNS name %s", svcDNSName)
|
||||||
}
|
|
||||||
|
|
||||||
_, tsHost, tsIPs, err := a.ssr.DeviceInfo(ctx, crl)
|
// Get all ConfigMaps for all proxies
|
||||||
if err != nil {
|
cmList := &corev1.ConfigMapList{}
|
||||||
return fmt.Errorf("failed to get device ID: %w", err)
|
if err := a.List(ctx, cmList); err != nil {
|
||||||
|
return fmt.Errorf("error listing proxy ConfigMaps: %w", err)
|
||||||
}
|
}
|
||||||
if tsHost == "" {
|
for _, cm := range cmList.Items {
|
||||||
logger.Debugf("no Tailscale hostname known yet, waiting for proxy pod to finish auth")
|
pcB := cm.BinaryData["proxyConfig"]
|
||||||
// No hostname yet. Wait for the proxy pod to auth.
|
if len(pcB) == 0 {
|
||||||
svc.Status.LoadBalancer.Ingress = nil
|
a.logger.Info("[unexpected] ConfigMap %s does not contain proxyConfig", cm.Name)
|
||||||
if err := a.Status().Update(ctx, svc); err != nil {
|
|
||||||
return fmt.Errorf("failed to update service status: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debugf("setting ingress to %q, %s", tsHost, strings.Join(tsIPs, ", "))
|
|
||||||
ingress := []corev1.LoadBalancerIngress{
|
|
||||||
{Hostname: tsHost},
|
|
||||||
}
|
|
||||||
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse cluster IP: %w", err)
|
|
||||||
}
|
|
||||||
for _, ip := range tsIPs {
|
|
||||||
addr, err := netip.ParseAddr(ip)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family
|
pc := &kubeutils.ProxyConfig{}
|
||||||
ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip})
|
if err := json.Unmarshal(pcB, pc); err != nil {
|
||||||
|
return fmt.Errorf("error unmarshalling proxyconfig for proxy %s: %w", cm.Name, err)
|
||||||
}
|
}
|
||||||
|
// does it have the service name already?
|
||||||
|
if _, ok := pc.Services[svcDNSName]; ok {
|
||||||
|
logger.Debugf("service %s already configured for proxy %s; do nothing", svcDNSName, cm.Name)
|
||||||
|
// TODO: check if the record is correct
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
svc.Status.LoadBalancer.Ingress = ingress
|
|
||||||
if err := a.Status().Update(ctx, svc); err != nil {
|
// pick an IP
|
||||||
return fmt.Errorf("failed to update service status: %w", err)
|
ip := unusedIPv4(pc.ServicesCIDRRange, pc.AddrsToDomain)
|
||||||
|
if pc.AddrsToDomain == nil {
|
||||||
|
pc.AddrsToDomain = &bart.Table[string]{}
|
||||||
|
}
|
||||||
|
pc.AddrsToDomain.Insert(netip.PrefixFrom(ip, ip.BitLen()), svcDNSName)
|
||||||
|
clusterIP, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error marshalling Service Cluster IP %v: %w", svc.Spec.ClusterIP, err)
|
||||||
|
}
|
||||||
|
svcConfig := kubeutils.Service{
|
||||||
|
V4ServiceIPs: []netip.Addr{ip},
|
||||||
|
FQDN: svcDNSName,
|
||||||
|
Ingress: &kubeutils.Ingress{
|
||||||
|
Type: "tcp", // currently unused
|
||||||
|
V4Backends: []netip.Addr{clusterIP},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
logger.Info("assigning Service IP %v to %s", ip, svcDNSName)
|
||||||
|
mak.Set(&pc.Services, svcDNSName, svcConfig)
|
||||||
|
pcB, err = json.Marshal(pc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error marshalling ConfigMap for proxy %s: %w", cm.Name, err)
|
||||||
|
}
|
||||||
|
mak.Set(&cm.BinaryData, "proxyConfig", pcB)
|
||||||
|
if err := a.Update(ctx, &cm); err != nil {
|
||||||
|
return fmt.Errorf("error updating ConfigMap %s: %w", cm.Name, err)
|
||||||
|
}
|
||||||
|
logger.Info("ConfigMap %s updated with a record for %s", cm.Name, svcDNSName)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -320,6 +260,12 @@ func (a *ServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool {
|
|||||||
}
|
}
|
||||||
return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc)
|
return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc)
|
||||||
}
|
}
|
||||||
|
func (a *ServiceReconciler) fqdnsForSvc(svc *corev1.Service, clusterDomain string) string {
|
||||||
|
if annot := svc.Annotations["tailscale.com/svc-name"]; annot != "" {
|
||||||
|
return annot + "." + clusterDomain
|
||||||
|
}
|
||||||
|
return svc.Name + "-" + svc.Namespace + "." + clusterDomain
|
||||||
|
}
|
||||||
|
|
||||||
func isTailscaleLoadBalancerService(svc *corev1.Service, isDefaultLoadBalancer bool) bool {
|
func isTailscaleLoadBalancerService(svc *corev1.Service, isDefaultLoadBalancer bool) bool {
|
||||||
return svc != nil &&
|
return svc != nil &&
|
||||||
@ -407,3 +353,39 @@ func clusterDomainFromResolverConf(conf *resolvconffile.Config, namespace string
|
|||||||
logger.Infof("Cluster domain %q extracted from resolver config", probablyClusterDomain)
|
logger.Infof("Cluster domain %q extracted from resolver config", probablyClusterDomain)
|
||||||
return probablyClusterDomain
|
return probablyClusterDomain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unusedIPv4(serviceCIDR netip.Prefix, usedIPs *bart.Table[string]) netip.Addr {
|
||||||
|
ip := randV4(serviceCIDR)
|
||||||
|
if usedIPs == nil {
|
||||||
|
return ip // first IP being assigned
|
||||||
|
}
|
||||||
|
for serviceCIDR.Contains(ip) {
|
||||||
|
if !isIPUsed(ip, usedIPs) {
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
ip = ip.Next()
|
||||||
|
}
|
||||||
|
return netip.Addr{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIPUsed(ip netip.Addr, usedIPs *bart.Table[string]) bool {
|
||||||
|
_, ok := usedIPs.Get(ip)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// randV4 returns a random IPv4 address within the given prefix.
|
||||||
|
func randV4(maskedPfx netip.Prefix) netip.Addr {
|
||||||
|
bits := 32 - maskedPfx.Bits()
|
||||||
|
randBits := rand.Uint32N(1 << uint(bits))
|
||||||
|
|
||||||
|
ip4 := maskedPfx.Addr().As4()
|
||||||
|
pn := binary.BigEndian.Uint32(ip4[:])
|
||||||
|
binary.BigEndian.PutUint32(ip4[:], randBits|pn)
|
||||||
|
return netip.AddrFrom4(ip4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// domainForIP returns the domain name assigned to the given IP address and
|
||||||
|
// whether it was found.
|
||||||
|
// func domainForIP(ip netip.Addr, serviceRecords ) (string, bool) {
|
||||||
|
// return ps.addrToDomain.Get(ip)
|
||||||
|
// }
|
||||||
|
11
go.mod
11
go.mod
@ -26,7 +26,7 @@ require (
|
|||||||
github.com/dsnet/try v0.0.3
|
github.com/dsnet/try v0.0.3
|
||||||
github.com/evanw/esbuild v0.19.11
|
github.com/evanw/esbuild v0.19.11
|
||||||
github.com/frankban/quicktest v1.14.6
|
github.com/frankban/quicktest v1.14.6
|
||||||
github.com/fxamacker/cbor/v2 v2.5.0
|
github.com/fxamacker/cbor/v2 v2.6.0
|
||||||
github.com/gaissmai/bart v0.4.1
|
github.com/gaissmai/bart v0.4.1
|
||||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0
|
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0
|
||||||
github.com/go-logr/zapr v1.3.0
|
github.com/go-logr/zapr v1.3.0
|
||||||
@ -89,14 +89,14 @@ require (
|
|||||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||||
golang.org/x/crypto v0.21.0
|
golang.org/x/crypto v0.21.0
|
||||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||||
golang.org/x/mod v0.14.0
|
golang.org/x/mod v0.15.0
|
||||||
golang.org/x/net v0.23.0
|
golang.org/x/net v0.23.0
|
||||||
golang.org/x/oauth2 v0.16.0
|
golang.org/x/oauth2 v0.16.0
|
||||||
golang.org/x/sync v0.6.0
|
golang.org/x/sync v0.6.0
|
||||||
golang.org/x/sys v0.18.0
|
golang.org/x/sys v0.18.0
|
||||||
golang.org/x/term v0.18.0
|
golang.org/x/term v0.18.0
|
||||||
golang.org/x/time v0.5.0
|
golang.org/x/time v0.5.0
|
||||||
golang.org/x/tools v0.17.0
|
golang.org/x/tools v0.18.0
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0
|
gopkg.in/square/go-jose.v2 v2.6.0
|
||||||
@ -106,6 +106,7 @@ require (
|
|||||||
k8s.io/apimachinery v0.29.1
|
k8s.io/apimachinery v0.29.1
|
||||||
k8s.io/apiserver v0.29.1
|
k8s.io/apiserver v0.29.1
|
||||||
k8s.io/client-go v0.29.1
|
k8s.io/client-go v0.29.1
|
||||||
|
k8s.io/kubernetes v1.30.1
|
||||||
nhooyr.io/websocket v1.8.10
|
nhooyr.io/websocket v1.8.10
|
||||||
sigs.k8s.io/controller-runtime v0.16.2
|
sigs.k8s.io/controller-runtime v0.16.2
|
||||||
sigs.k8s.io/controller-tools v0.13.0
|
sigs.k8s.io/controller-tools v0.13.0
|
||||||
@ -213,7 +214,7 @@ require (
|
|||||||
github.com/gobwas/glob v0.2.3 // indirect
|
github.com/gobwas/glob v0.2.3 // indirect
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||||
@ -370,7 +371,7 @@ require (
|
|||||||
k8s.io/apiextensions-apiserver v0.29.1 // indirect
|
k8s.io/apiextensions-apiserver v0.29.1 // indirect
|
||||||
k8s.io/component-base v0.29.1 // indirect
|
k8s.io/component-base v0.29.1 // indirect
|
||||||
k8s.io/klog/v2 v2.120.1 // indirect
|
k8s.io/klog/v2 v2.120.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
|
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
|
||||||
mvdan.cc/gofumpt v0.5.0 // indirect
|
mvdan.cc/gofumpt v0.5.0 // indirect
|
||||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||||
|
34
go.sum
34
go.sum
@ -290,8 +290,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
|
|||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
|
github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA=
|
||||||
github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||||
github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls=
|
github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls=
|
||||||
@ -396,8 +396,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
|
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
|
||||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
|
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
|
||||||
@ -694,10 +694,10 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N
|
|||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
|
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
|
||||||
@ -960,8 +960,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||||
@ -1034,8 +1034,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
|||||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -1281,8 +1281,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
|||||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||||
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -1445,8 +1445,10 @@ k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw=
|
|||||||
k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc=
|
k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc=
|
||||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 h1:m6dl1pkxz3HuE2mP9MUYPCCGyy6IIFlv/vTlLBDxIwA=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||||
|
k8s.io/kubernetes v1.30.1 h1:XlqS6KslLEA5mQzLK2AJrhr4Z1m8oJfkhHiWJ5lue+I=
|
||||||
|
k8s.io/kubernetes v1.30.1/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0=
|
||||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
|
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
|
||||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
|
mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
|
||||||
|
@ -8,6 +8,8 @@ Packages:
|
|||||||
|
|
||||||
Resource Types:
|
Resource Types:
|
||||||
|
|
||||||
|
- [ClusterConfig](#clusterconfig)
|
||||||
|
|
||||||
- [Connector](#connector)
|
- [Connector](#connector)
|
||||||
|
|
||||||
- [DNSConfig](#dnsconfig)
|
- [DNSConfig](#dnsconfig)
|
||||||
@ -17,6 +19,154 @@ Resource Types:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## ClusterConfig
|
||||||
|
<sup><sup>[↩ Parent](#tailscalecomv1alpha1 )</sup></sup>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Name</th>
|
||||||
|
<th>Type</th>
|
||||||
|
<th>Description</th>
|
||||||
|
<th>Required</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody><tr>
|
||||||
|
<td><b>apiVersion</b></td>
|
||||||
|
<td>string</td>
|
||||||
|
<td>tailscale.com/v1alpha1</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><b>kind</b></td>
|
||||||
|
<td>string</td>
|
||||||
|
<td>ClusterConfig</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta">metadata</a></b></td>
|
||||||
|
<td>object</td>
|
||||||
|
<td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr><tr>
|
||||||
|
<td><b><a href="#clusterconfigspec">spec</a></b></td>
|
||||||
|
<td>object</td>
|
||||||
|
<td>
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr><tr>
|
||||||
|
<td><b><a href="#clusterconfigstatus">status</a></b></td>
|
||||||
|
<td>object</td>
|
||||||
|
<td>
|
||||||
|
ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.<br/>
|
||||||
|
</td>
|
||||||
|
<td>false</td>
|
||||||
|
</tr></tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
|
||||||
|
### ClusterConfig.spec
|
||||||
|
<sup><sup>[↩ Parent](#clusterconfig)</sup></sup>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Name</th>
|
||||||
|
<th>Type</th>
|
||||||
|
<th>Description</th>
|
||||||
|
<th>Required</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody><tr>
|
||||||
|
<td><b>domain</b></td>
|
||||||
|
<td>string</td>
|
||||||
|
<td>
|
||||||
|
like 'foo.tailbd97a.ts.net' for services like 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr></tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
|
||||||
|
### ClusterConfig.status
|
||||||
|
<sup><sup>[↩ Parent](#clusterconfig)</sup></sup>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Name</th>
|
||||||
|
<th>Type</th>
|
||||||
|
<th>Description</th>
|
||||||
|
<th>Required</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody><tr>
|
||||||
|
<td><b><a href="#clusterconfigstatusproxynodesindex">proxyNodes</a></b></td>
|
||||||
|
<td>[]object</td>
|
||||||
|
<td>
|
||||||
|
<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr></tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
|
||||||
|
### ClusterConfig.status.proxyNodes[index]
|
||||||
|
<sup><sup>[↩ Parent](#clusterconfigstatus)</sup></sup>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Name</th>
|
||||||
|
<th>Type</th>
|
||||||
|
<th>Description</th>
|
||||||
|
<th>Required</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody><tr>
|
||||||
|
<td><b>magicDNSName</b></td>
|
||||||
|
<td>string</td>
|
||||||
|
<td>
|
||||||
|
<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr><tr>
|
||||||
|
<td><b>serviceCIDR</b></td>
|
||||||
|
<td>string</td>
|
||||||
|
<td>
|
||||||
|
<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr><tr>
|
||||||
|
<td><b>tailnetIPs</b></td>
|
||||||
|
<td>[]string</td>
|
||||||
|
<td>
|
||||||
|
<br/>
|
||||||
|
</td>
|
||||||
|
<td>true</td>
|
||||||
|
</tr></tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
## Connector
|
## Connector
|
||||||
<sup><sup>[↩ Parent](#tailscalecomv1alpha1 )</sup></sup>
|
<sup><sup>[↩ Parent](#tailscalecomv1alpha1 )</sup></sup>
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ func init() {
|
|||||||
|
|
||||||
// Adds the list of known types to api.Scheme.
|
// Adds the list of known types to api.Scheme.
|
||||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{})
|
scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{}, &ClusterConfig{}, &ClusterConfigList{})
|
||||||
|
|
||||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||||
return nil
|
return nil
|
||||||
|
67
k8s-operator/apis/v1alpha1/types_clusterconfig.go
Normal file
67
k8s-operator/apis/v1alpha1/types_clusterconfig.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
var ClusterConfigKind = "ClusterConfig"
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:resource:scope=Cluster
|
||||||
|
|
||||||
|
type ClusterConfig struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
// More info:
|
||||||
|
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
Spec ClusterConfigSpec `json:"spec"`
|
||||||
|
|
||||||
|
// ClusterConfigStatus describes the status of the ClusterConfig. This
|
||||||
|
// is set and managed by the Tailscale operator.
|
||||||
|
// +optional
|
||||||
|
Status ClusterConfigStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterConfigSpec struct {
|
||||||
|
// like 'foo.tailbd97a.ts.net' for services like
|
||||||
|
// 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?
|
||||||
|
Domain string `json:"domain"`
|
||||||
|
|
||||||
|
// TODO: number of proxies + cidr should be under a class- different
|
||||||
|
// classes should allow for different number of nodes
|
||||||
|
|
||||||
|
// Hardcoded to 4 for this prototype
|
||||||
|
// NumProxies int `json:"numProxies"`
|
||||||
|
|
||||||
|
// Hardcoded to 100.64.2.0/24 for this prototype.
|
||||||
|
// Question: is there a better way for users to allocate an unused CIDR
|
||||||
|
// than forcing IPs for all other nodes to a different CIDR via
|
||||||
|
// https://tailscale.com/kb/1304/ip-pool?
|
||||||
|
// CIDRv4 string `json:"cidrv4"`
|
||||||
|
|
||||||
|
// TODO: CIDRv6
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterConfigStatus struct {
|
||||||
|
ProxyNodes []ProxyNode `json:"proxyNodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProxyNode struct {
|
||||||
|
MagicDNSName string `json:"magicDNSName"`
|
||||||
|
TailnetIPs []string `json:"tailnetIPs"`
|
||||||
|
ServiceCIDR string `json:"serviceCIDR"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
type ClusterConfigList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata"`
|
||||||
|
|
||||||
|
Items []ClusterConfig `json:"items"`
|
||||||
|
}
|
@ -12,6 +12,102 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
out.Spec = in.Spec
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig.
|
||||||
|
func (in *ClusterConfig) DeepCopy() *ClusterConfig {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterConfig)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterConfig) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterConfigList) DeepCopyInto(out *ClusterConfigList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ClusterConfig, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigList.
|
||||||
|
func (in *ClusterConfigList) DeepCopy() *ClusterConfigList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterConfigList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterConfigList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterConfigSpec) DeepCopyInto(out *ClusterConfigSpec) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigSpec.
|
||||||
|
func (in *ClusterConfigSpec) DeepCopy() *ClusterConfigSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterConfigSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterConfigStatus) DeepCopyInto(out *ClusterConfigStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.ProxyNodes != nil {
|
||||||
|
in, out := &in.ProxyNodes, &out.ProxyNodes
|
||||||
|
*out = make([]ProxyNode, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigStatus.
|
||||||
|
func (in *ClusterConfigStatus) DeepCopy() *ClusterConfigStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterConfigStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Connector) DeepCopyInto(out *Connector) {
|
func (in *Connector) DeepCopyInto(out *Connector) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -523,6 +619,26 @@ func (in *ProxyClassStatus) DeepCopy() *ProxyClassStatus {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ProxyNode) DeepCopyInto(out *ProxyNode) {
|
||||||
|
*out = *in
|
||||||
|
if in.TailnetIPs != nil {
|
||||||
|
in, out := &in.TailnetIPs, &out.TailnetIPs
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyNode.
|
||||||
|
func (in *ProxyNode) DeepCopy() *ProxyNode {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ProxyNode)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in Routes) DeepCopyInto(out *Routes) {
|
func (in Routes) DeepCopyInto(out *Routes) {
|
||||||
{
|
{
|
||||||
|
@ -7,10 +7,14 @@ package kube
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
"github.com/gaissmai/bart"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: move all this to ./kube
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Alpha1Version = "v1alpha1"
|
Alpha1Version = "v1alpha1"
|
||||||
|
|
||||||
@ -24,8 +28,17 @@ type Records struct {
|
|||||||
// k8s-nameserver must verify that it knows how to parse a given
|
// k8s-nameserver must verify that it knows how to parse a given
|
||||||
// version.
|
// version.
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
|
||||||
|
// This will go- this will only contain ingress/egress destinations, not what
|
||||||
|
// service IPs this is assigned to.
|
||||||
|
|
||||||
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
||||||
IP4 map[string][]string `json:"ip4"`
|
IP4 map[string][]string `json:"ip4"`
|
||||||
|
// TODO: probably don't need this here
|
||||||
|
AddrsToDomain *bart.Table[string] `json:"addrsToDomain"`
|
||||||
|
// Probably should not be a string so that don't need to parse twice
|
||||||
|
// TODO: remove from here
|
||||||
|
DNSAddr string `json:"dnsAddr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TailscaledConfigFileNameForCap returns a tailscaled config file name in
|
// TailscaledConfigFileNameForCap returns a tailscaled config file name in
|
||||||
@ -47,3 +60,25 @@ func CapVerFromFileName(name string) (tailcfg.CapabilityVersion, error) {
|
|||||||
_, err := fmt.Sscanf(name, "cap-%d.hujson", &cap)
|
_, err := fmt.Sscanf(name, "cap-%d.hujson", &cap)
|
||||||
return cap, err
|
return cap, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ProxyConfig struct {
|
||||||
|
// Maybe we don't need to put this one here- it's just convenient for
|
||||||
|
// the services reconciler to read it from here.
|
||||||
|
ServicesCIDRRange netip.Prefix `json:"serviceCIDR,omitempty"`
|
||||||
|
Services map[string]Service `json:"services,omitempty"`
|
||||||
|
|
||||||
|
// For lookup convenience
|
||||||
|
AddrsToDomain *bart.Table[string] `json:"addrsToDomain,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
FQDN string `json:"fqdn,omitempty"`
|
||||||
|
V4ServiceIPs []netip.Addr `json:"vService4ips"`
|
||||||
|
Ingress *Ingress `json:"ingress"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ingress struct {
|
||||||
|
Type string `json:"type"` // tcp or http
|
||||||
|
// type?
|
||||||
|
V4Backends []netip.Addr `json:"v4Backends"`
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user