mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-29 04:55:31 +00:00
WIP
This commit is contained in:
parent
436794cf7a
commit
5156ec6a3b
@ -61,6 +61,8 @@
|
||||
// and not `tailscale up` or `tailscale set`.
|
||||
// The config file contents are currently read once on container start.
|
||||
// NB: This env var is currently experimental and the logic will likely change!
|
||||
// - TS_EXPERIMENTAL_SERVICES_CONFIG_PATH
|
||||
// Path where config for Services served by this proxy can be found.
|
||||
// - EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS: if set to true
|
||||
// and if this containerboot instance is an L7 ingress proxy (created by
|
||||
// the Kubernetes operator), set up rules to allow proxying cluster traffic,
|
||||
@ -113,11 +115,14 @@
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/deephash"
|
||||
"tailscale.com/util/linuxfw"
|
||||
)
|
||||
|
||||
const (
|
||||
kubeletMountedConfigLn = "..data"
|
||||
)
|
||||
|
||||
func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) {
|
||||
if defaultBool("TS_TEST_FAKE_NETFILTER", false) {
|
||||
return linuxfw.NewFakeIPTablesRunner(), nil
|
||||
@ -133,6 +138,7 @@ func main() {
|
||||
Hostname: defaultEnv("TS_HOSTNAME", ""),
|
||||
Routes: defaultEnvStringPointer("TS_ROUTES"),
|
||||
ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""),
|
||||
ServicesConfigPath: defaultEnv("TS_EXPERIMENTAL_SERVICES_CONFIG_PATH", ""),
|
||||
ProxyTargetIP: defaultEnv("TS_DEST_IP", ""),
|
||||
ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""),
|
||||
TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""),
|
||||
@ -325,13 +331,13 @@ func main() {
|
||||
}
|
||||
|
||||
var (
|
||||
wantProxy = cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress
|
||||
wantProxy = cfg.ServicesConfigPath != "" || cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress
|
||||
wantDeviceInfo = cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch
|
||||
startupTasksDone = false
|
||||
currentIPs deephash.Sum // tailscale IPs assigned to device
|
||||
currentDeviceInfo deephash.Sum // device ID and fqdn
|
||||
|
||||
currentEgressIPs deephash.Sum
|
||||
// currentEgressIPs deephash.Sum
|
||||
|
||||
addrs []netip.Prefix
|
||||
backendAddrs []net.IP
|
||||
@ -389,6 +395,11 @@ func main() {
|
||||
|
||||
notifyChan := make(chan ipn.Notify)
|
||||
errChan := make(chan error)
|
||||
log.Printf("attempting to update service config...")
|
||||
if err := updateServices(cfg, nfr); err != nil {
|
||||
log.Printf("error updating services: %v", err)
|
||||
}
|
||||
log.Printf("ConfigMap update processed")
|
||||
go func() {
|
||||
for {
|
||||
n, err := w.Next()
|
||||
@ -401,7 +412,53 @@ func main() {
|
||||
}
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if cfg.ServicesConfigPath != "" {
|
||||
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
||||
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
||||
// use if they need to monitor changes
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
||||
toWatch := filepath.Join(cfg.ServicesConfigPath, kubeletMountedConfigLn)
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatalf("error creating a new watcher for the mounted ConfigMap: %v", err)
|
||||
}
|
||||
log.Printf("will be watching services cm")
|
||||
go func() {
|
||||
defer watcher.Close()
|
||||
for {
|
||||
log.Printf("waiting for ConfigMap updates..")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Print("context cancelled, exiting ConfigMap watcher")
|
||||
return
|
||||
case event, ok := <-watcher.Events:
|
||||
log.Printf("ConfigMap update received: %s", event)
|
||||
if !ok {
|
||||
log.Fatal("watcher finished; exiting")
|
||||
}
|
||||
if event.Name == toWatch {
|
||||
log.Printf("update is for an event to watch: %s", event)
|
||||
if err := updateServices(cfg, nfr); err != nil {
|
||||
log.Printf("error updating services: %v", err)
|
||||
}
|
||||
log.Printf("ConfigMap update processed")
|
||||
} else {
|
||||
log.Printf("update is not for an event to watch: %s", event)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if err != nil {
|
||||
log.Fatalf("[unexpected] error watching configuration: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
log.Fatalf("[unexpected] errors watcher exited")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err = watcher.Add(cfg.ServicesConfigPath); err != nil {
|
||||
log.Fatalf("failed setting up a watcher for the mounted ConfigMap: %v", err)
|
||||
}
|
||||
}
|
||||
runLoop:
|
||||
for {
|
||||
select {
|
||||
@ -423,111 +480,111 @@ func main() {
|
||||
// whereupon we'll go through initial auth again.
|
||||
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
|
||||
}
|
||||
if n.NetMap != nil {
|
||||
addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
||||
newCurrentIPs := deephash.Hash(&addrs)
|
||||
ipsHaveChanged := newCurrentIPs != currentIPs
|
||||
// if n.NetMap != nil {
|
||||
// addrs = n.NetMap.SelfNode.Addresses().AsSlice()
|
||||
// newCurrentIPs := deephash.Hash(&addrs)
|
||||
// ipsHaveChanged := newCurrentIPs != currentIPs
|
||||
|
||||
if cfg.TailnetTargetFQDN != "" {
|
||||
var (
|
||||
egressAddrs []netip.Prefix
|
||||
newCurentEgressIPs deephash.Sum
|
||||
egressIPsHaveChanged bool
|
||||
node tailcfg.NodeView
|
||||
nodeFound bool
|
||||
)
|
||||
for _, n := range n.NetMap.Peers {
|
||||
if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) {
|
||||
node = n
|
||||
nodeFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !nodeFound {
|
||||
log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN)
|
||||
break
|
||||
}
|
||||
egressAddrs = node.Addresses().AsSlice()
|
||||
newCurentEgressIPs = deephash.Hash(&egressAddrs)
|
||||
egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs
|
||||
if egressIPsHaveChanged && len(egressAddrs) != 0 {
|
||||
for _, egressAddr := range egressAddrs {
|
||||
ea := egressAddr.Addr()
|
||||
// TODO (irbekrm): make it work for IPv6 too.
|
||||
if ea.Is6() {
|
||||
log.Println("Not installing egress forwarding rules for IPv6 as this is currently not supported")
|
||||
continue
|
||||
}
|
||||
log.Printf("Installing forwarding rules for destination %v", ea.String())
|
||||
if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
currentEgressIPs = newCurentEgressIPs
|
||||
}
|
||||
if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
log.Printf("Installing proxy rules")
|
||||
if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing ingress proxy rules: %v", err)
|
||||
}
|
||||
}
|
||||
if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
newBackendAddrs, err := resolveDNS(ctx, cfg.ProxyTargetDNSName)
|
||||
if err != nil {
|
||||
log.Printf("[unexpected] error resolving DNS name %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
resetTimer(true)
|
||||
continue
|
||||
}
|
||||
backendsHaveChanged := !(slices.EqualFunc(backendAddrs, newBackendAddrs, func(ip1 net.IP, ip2 net.IP) bool {
|
||||
return slices.ContainsFunc(newBackendAddrs, func(ip net.IP) bool { return ip.Equal(ip1) })
|
||||
}))
|
||||
if backendsHaveChanged {
|
||||
log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
||||
if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
log.Fatalf("error installing ingress proxy rules: %v", err)
|
||||
}
|
||||
}
|
||||
resetTimer(false)
|
||||
backendAddrs = newBackendAddrs
|
||||
}
|
||||
if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 {
|
||||
cd := n.NetMap.DNS.CertDomains[0]
|
||||
prev := certDomain.Swap(ptr.To(cd))
|
||||
if prev == nil || *prev != cd {
|
||||
select {
|
||||
case certDomainChanged <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
||||
if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing egress proxy rules: %v", err)
|
||||
}
|
||||
}
|
||||
// If this is a L7 cluster ingress proxy (set up
|
||||
// by Kubernetes operator) and proxying of
|
||||
// cluster traffic to the ingress target is
|
||||
// enabled, set up proxy rule each time the
|
||||
// tailnet IPs of this node change (including
|
||||
// the first time they become available).
|
||||
if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
||||
if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
||||
log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
||||
}
|
||||
}
|
||||
currentIPs = newCurrentIPs
|
||||
// if cfg.TailnetTargetFQDN != "" {
|
||||
// var (
|
||||
// egressAddrs []netip.Prefix
|
||||
// newCurentEgressIPs deephash.Sum
|
||||
// egressIPsHaveChanged bool
|
||||
// node tailcfg.NodeView
|
||||
// nodeFound bool
|
||||
// )
|
||||
// for _, n := range n.NetMap.Peers {
|
||||
// if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) {
|
||||
// node = n
|
||||
// nodeFound = true
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// if !nodeFound {
|
||||
// log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN)
|
||||
// break
|
||||
// }
|
||||
// egressAddrs = node.Addresses().AsSlice()
|
||||
// newCurentEgressIPs = deephash.Hash(&egressAddrs)
|
||||
// egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs
|
||||
// if egressIPsHaveChanged && len(egressAddrs) != 0 {
|
||||
// for _, egressAddr := range egressAddrs {
|
||||
// ea := egressAddr.Addr()
|
||||
// // TODO (irbekrm): make it work for IPv6 too.
|
||||
// if ea.Is6() {
|
||||
// log.Println("Not installing egress forwarding rules for IPv6 as this is currently not supported")
|
||||
// continue
|
||||
// }
|
||||
// log.Printf("Installing forwarding rules for destination %v", ea.String())
|
||||
// if err := installEgressForwardingRule(ctx, ea.String(), addrs, nfr); err != nil {
|
||||
// log.Fatalf("installing egress proxy rules for destination %s: %v", ea.String(), err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// currentEgressIPs = newCurentEgressIPs
|
||||
// }
|
||||
// if cfg.ProxyTargetIP != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
// log.Printf("Installing proxy rules")
|
||||
// if err := installIngressForwardingRule(ctx, cfg.ProxyTargetIP, addrs, nfr); err != nil {
|
||||
// log.Fatalf("installing ingress proxy rules: %v", err)
|
||||
// }
|
||||
// }
|
||||
// if cfg.ProxyTargetDNSName != "" && len(addrs) != 0 && ipsHaveChanged {
|
||||
// newBackendAddrs, err := resolveDNS(ctx, cfg.ProxyTargetDNSName)
|
||||
// if err != nil {
|
||||
// log.Printf("[unexpected] error resolving DNS name %s: %v", cfg.ProxyTargetDNSName, err)
|
||||
// resetTimer(true)
|
||||
// continue
|
||||
// }
|
||||
// backendsHaveChanged := !(slices.EqualFunc(backendAddrs, newBackendAddrs, func(ip1 net.IP, ip2 net.IP) bool {
|
||||
// return slices.ContainsFunc(newBackendAddrs, func(ip net.IP) bool { return ip.Equal(ip1) })
|
||||
// }))
|
||||
// if backendsHaveChanged {
|
||||
// log.Printf("installing ingress proxy rules for backends %v", newBackendAddrs)
|
||||
// if err := installIngressForwardingRuleForDNSTarget(ctx, newBackendAddrs, addrs, nfr); err != nil {
|
||||
// log.Fatalf("error installing ingress proxy rules: %v", err)
|
||||
// }
|
||||
// }
|
||||
// resetTimer(false)
|
||||
// backendAddrs = newBackendAddrs
|
||||
// }
|
||||
// if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 {
|
||||
// cd := n.NetMap.DNS.CertDomains[0]
|
||||
// prev := certDomain.Swap(ptr.To(cd))
|
||||
// if prev == nil || *prev != cd {
|
||||
// select {
|
||||
// case certDomainChanged <- true:
|
||||
// default:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if cfg.TailnetTargetIP != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
// log.Printf("Installing forwarding rules for destination %v", cfg.TailnetTargetIP)
|
||||
// if err := installEgressForwardingRule(ctx, cfg.TailnetTargetIP, addrs, nfr); err != nil {
|
||||
// log.Fatalf("installing egress proxy rules: %v", err)
|
||||
// }
|
||||
// }
|
||||
// // If this is a L7 cluster ingress proxy (set up
|
||||
// // by Kubernetes operator) and proxying of
|
||||
// // cluster traffic to the ingress target is
|
||||
// // enabled, set up proxy rule each time the
|
||||
// // tailnet IPs of this node change (including
|
||||
// // the first time they become available).
|
||||
// if cfg.AllowProxyingClusterTrafficViaIngress && cfg.ServeConfigPath != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
// log.Printf("installing rules to forward traffic for %s to node's tailnet IP", cfg.PodIP)
|
||||
// if err := installTSForwardingRuleForDestination(ctx, cfg.PodIP, addrs, nfr); err != nil {
|
||||
// log.Fatalf("installing rules to forward traffic to node's tailnet IP: %v", err)
|
||||
// }
|
||||
// }
|
||||
// currentIPs = newCurrentIPs
|
||||
|
||||
deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
|
||||
if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(¤tDeviceInfo, &deviceInfo) {
|
||||
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
||||
log.Fatalf("storing device ID in kube secret: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
|
||||
// if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(¤tDeviceInfo, &deviceInfo) {
|
||||
// if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
|
||||
// log.Fatalf("storing device ID in kube secret: %v", err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
if !startupTasksDone {
|
||||
if (!wantProxy || currentIPs != deephash.Sum{}) && (!wantDeviceInfo || currentDeviceInfo != deephash.Sum{}) {
|
||||
// This log message is used in tests to detect when all
|
||||
@ -697,6 +754,48 @@ func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient
|
||||
return tsClient, cmd.Process, nil
|
||||
}
|
||||
|
||||
func updateServices(cfg *settings, nfr linuxfw.NetfilterRunner) error {
|
||||
if cfg.ServicesConfigPath == "" {
|
||||
log.Print("no services config path set")
|
||||
return nil
|
||||
}
|
||||
b, err := os.ReadFile(path.Join(cfg.ServicesConfigPath, "proxyConfig"))
|
||||
if err != nil {
|
||||
log.Printf("error reading in services config at path %s: %v", cfg.ServicesConfigPath, err)
|
||||
return nil
|
||||
}
|
||||
proxyCfg := &kubeutils.ProxyConfig{}
|
||||
if err := json.Unmarshal(b, proxyCfg); err != nil {
|
||||
log.Printf("error unmarshalling proxy config: %v", err)
|
||||
return nil
|
||||
}
|
||||
if len(proxyCfg.Services) == 0 {
|
||||
log.Printf("No Services defined")
|
||||
return nil
|
||||
}
|
||||
|
||||
for name, svcFg := range proxyCfg.Services {
|
||||
log.Printf("configuring Service for %s", name)
|
||||
if svcFg.Ingress == nil || len(svcFg.Ingress.V4Backends) != 1 {
|
||||
// This prototype only suppports ingress with one backend address
|
||||
log.Printf("[unexpected] service is not ingress with a single backend address")
|
||||
return nil
|
||||
}
|
||||
if len(svcFg.V4ServiceIPs) != 1 {
|
||||
// This prototype only suppports ingress with one backend address
|
||||
log.Printf("[unexpected] a single service IP expected, got %v", svcFg.V4ServiceIPs)
|
||||
return nil
|
||||
}
|
||||
if err := nfr.AddDNATRule(svcFg.V4ServiceIPs[0], svcFg.Ingress.V4Backends[0]); err != nil {
|
||||
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||
}
|
||||
if err := nfr.ClampMSSToPMTU("tailscale0", svcFg.V4ServiceIPs[0]); err != nil {
|
||||
return fmt.Errorf("installing ingress proxy rules: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tailscaledArgs uses cfg to construct the argv for tailscaled.
|
||||
func tailscaledArgs(cfg *settings) []string {
|
||||
args := []string{"--socket=" + cfg.Socket}
|
||||
@ -1075,6 +1174,7 @@ type settings struct {
|
||||
// node FQDN.
|
||||
TailnetTargetFQDN string
|
||||
ServeConfigPath string
|
||||
ServicesConfigPath string
|
||||
DaemonExtraArgs string
|
||||
ExtraArgs string
|
||||
InKubernetes bool
|
||||
|
@ -13,28 +13,30 @@
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/miekg/dns"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"k8s.io/utils/pointer"
|
||||
"tailscale.com/ipn/store/kubestore"
|
||||
operatorutils "tailscale.com/k8s-operator"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/nettype"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
const (
|
||||
// tsNetDomain is the domain that this DNS nameserver has registered a handler for.
|
||||
tsNetDomain = "ts.net"
|
||||
// addr is the the address that the UDP and TCP listeners will listen on.
|
||||
addr = ":1053"
|
||||
addr = ":53"
|
||||
|
||||
// The following constants are specific to the nameserver configuration
|
||||
// provided by a mounted Kubernetes Configmap. The Configmap mounted at
|
||||
// /config is the only supported way for configuring this nameserver.
|
||||
defaultDNSConfigDir = "/config"
|
||||
kubeletMountedConfigLn = "..data"
|
||||
)
|
||||
@ -55,122 +57,187 @@ type nameserver struct {
|
||||
// configuration has changed and the nameserver should update the
|
||||
// in-memory records.
|
||||
configWatcher <-chan string
|
||||
proxies []string
|
||||
|
||||
mu sync.Mutex // protects following
|
||||
// ip4 are the in-memory hostname -> IP4 mappings that the nameserver
|
||||
// uses to respond to A record queries.
|
||||
ip4 map[dnsname.FQDN][]net.IP
|
||||
mu sync.Mutex // protects following
|
||||
serviceIPs map[dnsname.FQDN][]netip.Addr
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Ensure that we watch the kube Configmap mounted at /config for
|
||||
// nameserver configuration updates and send events when updates happen.
|
||||
c := ensureWatcherForKubeConfigMap(ctx)
|
||||
// state always in 'dnsrecords' Secret
|
||||
kubeStateStore, err := kubestore.New(log.Printf, *pointer.StringPtr("nameserver-state"))
|
||||
if err != nil {
|
||||
log.Fatalf("error starting kube state store: %v", err)
|
||||
}
|
||||
ts := tsnet.Server{
|
||||
Logf: log.Printf,
|
||||
Hostname: "dns-server",
|
||||
Dir: "/tmp",
|
||||
Store: kubeStateStore,
|
||||
}
|
||||
if _, err := ts.Up(ctx); err != nil {
|
||||
log.Fatalf("ts.Up: %v", err)
|
||||
}
|
||||
defer ts.Close()
|
||||
|
||||
// hardcoded for this prototype
|
||||
proxies := []string{"proxies-0", "proxies-1", "proxies-2", "proxies-3"}
|
||||
c := ensureWatcherForServiceConfigMaps(ctx, proxies)
|
||||
|
||||
ns := &nameserver{
|
||||
configReader: configMapConfigReader,
|
||||
configWatcher: c,
|
||||
proxies: proxies,
|
||||
}
|
||||
|
||||
// Ensure that in-memory records get set up to date now and will get
|
||||
// reset when the configuration changes.
|
||||
ns.runRecordsReconciler(ctx)
|
||||
ns.runServiceRecordsReconciler(ctx)
|
||||
|
||||
// Register a DNS server handle for ts.net domain names. Not having a
|
||||
// handle registered for any other domain names is how we enforce that
|
||||
// this nameserver can only be used for ts.net domains - querying any
|
||||
// other domain names returns Rcode Refused.
|
||||
dns.HandleFunc(tsNetDomain, ns.handleFunc())
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Listen for DNS queries over UDP and TCP.
|
||||
udpSig := make(chan os.Signal)
|
||||
tcpSig := make(chan os.Signal)
|
||||
go listenAndServe("udp", addr, udpSig)
|
||||
go listenAndServe("tcp", addr, tcpSig)
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
|
||||
s := <-sig
|
||||
log.Printf("OS signal (%s) received, shutting down", s)
|
||||
cancel() // exit the records reconciler and configmap watcher goroutines
|
||||
udpSig <- s // stop the UDP listener
|
||||
tcpSig <- s // stop the TCP listener
|
||||
udpListener, err := ts.Listen("udp", addr)
|
||||
if err != nil {
|
||||
log.Fatalf("failed listening on udp port :53")
|
||||
}
|
||||
defer udpListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
ns.serveDNS(udpListener)
|
||||
}()
|
||||
log.Printf("Listening for DNS on UDP %s", udpListener.Addr())
|
||||
|
||||
tcpListener, err := ts.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalf("failed listening on tcp port :53")
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
ns.serveDNS(tcpListener)
|
||||
}()
|
||||
log.Printf("Listening for DNS on TCP %s", tcpListener.Addr())
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// handleFunc is a DNS query handler that can respond to A record queries from
|
||||
// the nameserver's in-memory records.
|
||||
// - If an A record query is received and the
|
||||
// nameserver's in-memory records contain records for the queried domain name,
|
||||
// return a success response.
|
||||
// - If an A record query is received, but the
|
||||
// nameserver's in-memory records do not contain records for the queried domain name,
|
||||
// return NXDOMAIN.
|
||||
// - If an A record query is received, but the queried domain name is not valid, return Format Error.
|
||||
// - If a query is received for any other record type than A, return Not Implemented.
|
||||
func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
h := func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
defer func() {
|
||||
w.WriteMsg(m)
|
||||
}()
|
||||
if len(r.Question) < 1 {
|
||||
log.Print("[unexpected] nameserver received a request with no questions")
|
||||
m = r.SetRcodeFormatError(r)
|
||||
func (c *nameserver) serveDNS(ln net.Listener) {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Printf("serveDNS accept: %v", err)
|
||||
return
|
||||
}
|
||||
// TODO (irbekrm): maybe set message compression
|
||||
switch r.Question[0].Qtype {
|
||||
case dns.TypeA:
|
||||
q := r.Question[0].Name
|
||||
fqdn, err := dnsname.ToFQDN(q)
|
||||
if err != nil {
|
||||
m = r.SetRcodeFormatError(r)
|
||||
return
|
||||
}
|
||||
// The only supported use of this nameserver is as a
|
||||
// single source of truth for MagicDNS names by
|
||||
// non-tailnet Kubernetes workloads.
|
||||
m.Authoritative = true
|
||||
m.RecursionAvailable = false
|
||||
|
||||
ips := n.lookupIP4(fqdn)
|
||||
if ips == nil || len(ips) == 0 {
|
||||
// As we are the authoritative nameserver for MagicDNS
|
||||
// names, if we do not have a record for this MagicDNS
|
||||
// name, it does not exist.
|
||||
m = m.SetRcode(r, dns.RcodeNameError)
|
||||
return
|
||||
}
|
||||
// TODO (irbekrm): TTL is currently set to 0, meaning
|
||||
// that cluster workloads will not cache the DNS
|
||||
// records. Revisit this in future when we understand
|
||||
// the usage patterns better- is it putting too much
|
||||
// load on kube DNS server or is this fine?
|
||||
for _, ip := range ips {
|
||||
rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip}
|
||||
m.SetRcode(r, dns.RcodeSuccess)
|
||||
m.Answer = append(m.Answer, rr)
|
||||
}
|
||||
case dns.TypeAAAA:
|
||||
// TODO (irbekrm): implement IPv6 support.
|
||||
// Kubernetes distributions that I am most familiar with
|
||||
// default to IPv4 for Pod CIDR ranges and often many cases don't
|
||||
// support IPv6 at all, so this should not be crucial for now.
|
||||
fallthrough
|
||||
default:
|
||||
log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String())
|
||||
m.SetRcode(r, dns.RcodeNotImplemented)
|
||||
}
|
||||
go c.handleServiceName(conn.(nettype.ConnPacketConn))
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// runRecordsReconciler ensures that nameserver's in-memory records are
|
||||
// reset when the provided configuration changes.
|
||||
func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
||||
log.Print("updating nameserver's records from the provided configuration...")
|
||||
if err := n.resetRecords(); err != nil { // ensure records are up to date before the nameserver starts
|
||||
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
|
||||
|
||||
func (ns *nameserver) handleServiceName(conn nettype.ConnPacketConn) {
|
||||
defer conn.Close()
|
||||
conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
buf := make([]byte, 1500)
|
||||
n, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
log.Printf("handeServiceName: read failed: %v\n ", err)
|
||||
return
|
||||
}
|
||||
var msg dnsmessage.Message
|
||||
err = msg.Unpack(buf[:n])
|
||||
if err != nil {
|
||||
log.Printf("handleServiceName: dnsmessage unpack failed: %v\n ", err)
|
||||
return
|
||||
}
|
||||
resp, err := ns.generateDNSResponse(&msg)
|
||||
if err != nil {
|
||||
log.Printf("handleServiceName: DNS response generation failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
if len(resp) == 0 {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(resp)
|
||||
if err != nil {
|
||||
log.Printf("handleServiceName: write failed: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *nameserver) generateDNSResponse(req *dnsmessage.Message) ([]byte, error) {
|
||||
b := dnsmessage.NewBuilder(nil,
|
||||
dnsmessage.Header{
|
||||
ID: req.Header.ID,
|
||||
Response: true,
|
||||
Authoritative: true,
|
||||
})
|
||||
b.EnableCompression()
|
||||
|
||||
if len(req.Questions) == 0 {
|
||||
return b.Finish()
|
||||
}
|
||||
q := req.Questions[0]
|
||||
if err := b.StartQuestions(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.Question(q); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.StartAnswers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
switch q.Type {
|
||||
case dnsmessage.TypeA:
|
||||
log.Printf("query for an A record")
|
||||
var fqdn dnsname.FQDN
|
||||
fqdn, err = dnsname.ToFQDN(q.Name.String())
|
||||
if err != nil {
|
||||
log.Print("format error")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Print("locking service IPs")
|
||||
ns.mu.Lock()
|
||||
ips := ns.serviceIPs[fqdn]
|
||||
ns.mu.Unlock()
|
||||
log.Print("unlocking service IPs")
|
||||
|
||||
if ips == nil || len(ips) == 0 {
|
||||
log.Printf("nameserver has no IPs for %s", fqdn)
|
||||
// NXDOMAIN?
|
||||
return nil, fmt.Errorf("no address found for %s", fqdn)
|
||||
}
|
||||
|
||||
// return a random IP
|
||||
i := rand.Intn(len(ips))
|
||||
ip := ips[i]
|
||||
log.Printf("produced IP address %s", ip)
|
||||
err = b.AResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 5},
|
||||
dnsmessage.AResource{A: ip.As4()},
|
||||
)
|
||||
case dnsmessage.TypeSOA:
|
||||
err = b.SOAResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600,
|
||||
Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60},
|
||||
)
|
||||
case dnsmessage.TypeNS:
|
||||
err = b.NSResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.NSResource{NS: tsMBox},
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Finish()
|
||||
}
|
||||
|
||||
func (n *nameserver) runServiceRecordsReconciler(ctx context.Context) {
|
||||
log.Print("updating nameserver's records from the provided services configuration...")
|
||||
if err := n.resetServiceRecords(); err != nil { // ensure records are up to date before the nameserver starts
|
||||
log.Fatalf("error setting nameserver's records: %v", err)
|
||||
}
|
||||
log.Print("nameserver's records were updated")
|
||||
@ -182,13 +249,7 @@ func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
||||
return
|
||||
case <-n.configWatcher:
|
||||
log.Print("configuration update detected, resetting records")
|
||||
if err := n.resetRecords(); err != nil {
|
||||
// TODO (irbekrm): this runs in a
|
||||
// container that will be thrown away,
|
||||
// so this should be ok. But maybe still
|
||||
// need to ensure that the DNS server
|
||||
// terminates connections more
|
||||
// gracefully.
|
||||
if err := n.resetServiceRecords(); err != nil {
|
||||
log.Fatalf("error resetting records: %v", err)
|
||||
}
|
||||
log.Print("nameserver records were reset")
|
||||
@ -197,93 +258,47 @@ func (n *nameserver) runRecordsReconciler(ctx context.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
// resetRecords sets the in-memory DNS records of this nameserver from the
|
||||
// provided configuration. It does not check for the diff, so the caller is
|
||||
// expected to ensure that this is only called when reset is needed.
|
||||
func (n *nameserver) resetRecords() error {
|
||||
dnsCfgBytes, err := n.configReader()
|
||||
if err != nil {
|
||||
log.Printf("error reading nameserver's configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 {
|
||||
log.Print("nameserver's configuration is empty, any in-memory records will be unset")
|
||||
n.mu.Lock()
|
||||
n.ip4 = make(map[dnsname.FQDN][]net.IP)
|
||||
n.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
dnsCfg := &operatorutils.Records{}
|
||||
err = json.Unmarshal(dnsCfgBytes, dnsCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshalling nameserver configuration: %v\n", err)
|
||||
}
|
||||
|
||||
if dnsCfg.Version != operatorutils.Alpha1Version {
|
||||
return fmt.Errorf("unsupported configuration version %s, supported versions are %s\n", dnsCfg.Version, operatorutils.Alpha1Version)
|
||||
}
|
||||
|
||||
ip4 := make(map[dnsname.FQDN][]net.IP)
|
||||
defer func() {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
n.ip4 = ip4
|
||||
}()
|
||||
|
||||
if len(dnsCfg.IP4) == 0 {
|
||||
log.Print("nameserver's configuration contains no records, any in-memory records will be unset")
|
||||
return nil
|
||||
}
|
||||
|
||||
for fqdn, ips := range dnsCfg.IP4 {
|
||||
fqdn, err := dnsname.ToFQDN(fqdn)
|
||||
func (n *nameserver) resetServiceRecords() error {
|
||||
ip4 := make(map[dnsname.FQDN][]netip.Addr)
|
||||
for _, proxy := range n.proxies {
|
||||
dnsCfgBytes, err := proxyConfigReader(proxy)
|
||||
if err != nil {
|
||||
log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err)
|
||||
continue // one invalid hostname should not break the whole nameserver
|
||||
log.Printf("error reading proxy config for %s configuration: %v", proxy, err)
|
||||
return err
|
||||
}
|
||||
for _, ipS := range ips {
|
||||
ip := net.ParseIP(ipS).To4()
|
||||
if ip == nil { // To4 returns nil if IP is not a IPv4 address
|
||||
log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS)
|
||||
continue // one invalid IP address should not break the whole nameserver
|
||||
}
|
||||
ip4[fqdn] = []net.IP{ip}
|
||||
if dnsCfgBytes == nil || len(dnsCfgBytes) == 0 {
|
||||
log.Printf("configuration for proxy %s is empty; do nothing", proxy)
|
||||
continue
|
||||
}
|
||||
proxyCfg := &operatorutils.ProxyConfig{}
|
||||
|
||||
err = json.Unmarshal(dnsCfgBytes, proxyCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshalling proxy config: %v\n", err)
|
||||
}
|
||||
for _, svc := range proxyCfg.Services {
|
||||
log.Printf("adding record for Service %s", svc.FQDN)
|
||||
ip4[dnsname.FQDN(svc.FQDN)] = append(ip4[dnsname.FQDN(svc.FQDN)], svc.V4ServiceIPs...)
|
||||
}
|
||||
}
|
||||
log.Printf("after update DNS records are %#+v", ip4)
|
||||
n.mu.Lock()
|
||||
n.serviceIPs = ip4
|
||||
n.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// listenAndServe starts a DNS server for the provided network and address.
|
||||
func listenAndServe(net, addr string, shutdown chan os.Signal) {
|
||||
s := &dns.Server{Addr: addr, Net: net}
|
||||
go func() {
|
||||
<-shutdown
|
||||
log.Printf("shutting down server for %s", net)
|
||||
s.Shutdown()
|
||||
}()
|
||||
log.Printf("listening for %s queries on %s", net, addr)
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
log.Fatalf("error running %s server: %v", net, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ensureWatcherForKubeConfigMap sets up a new file watcher for the ConfigMap
|
||||
// that's expected to be mounted at /config. Returns a channel that receives an
|
||||
// event every time the contents get updated.
|
||||
func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
||||
// ensureWatcherForServiceConfigMaps sets up a new file watcher for the
|
||||
// ConfigMaps containing records for Services served by the operator proxies.
|
||||
func ensureWatcherForServiceConfigMaps(ctx context.Context, proxies []string) chan string {
|
||||
c := make(chan string)
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatalf("error creating a new watcher for the mounted ConfigMap: %v", err)
|
||||
log.Fatalf("error creating a new watcher for the services ConfigMap: %v", err)
|
||||
}
|
||||
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
||||
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
||||
// use if they need to monitor changes
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
||||
toWatch := filepath.Join(defaultDNSConfigDir, kubeletMountedConfigLn)
|
||||
go func() {
|
||||
defer watcher.Close()
|
||||
log.Printf("starting file watch for %s", defaultDNSConfigDir)
|
||||
log.Printf("starting file watch for %s", "/services/")
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -293,35 +308,31 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
||||
if !ok {
|
||||
log.Fatal("watcher finished; exiting")
|
||||
}
|
||||
if event.Name == toWatch {
|
||||
// kubelet mounts configmap to a Pod using a series of symlinks, one of
|
||||
// which is <mount-dir>/..data that Kubernetes recommends consumers to
|
||||
// use if they need to monitor changes
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61
|
||||
if strings.HasSuffix(event.Name, kubeletMountedConfigLn) {
|
||||
msg := fmt.Sprintf("ConfigMap update received: %s", event)
|
||||
log.Print(msg)
|
||||
c <- msg
|
||||
n := path.Dir(event.Name)
|
||||
base := path.Base(n)
|
||||
c <- base // which proxy's ConfigMap should be updated
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if err != nil {
|
||||
// TODO (irbekrm): this runs in a
|
||||
// container that will be thrown away,
|
||||
// so this should be ok. But maybe still
|
||||
// need to ensure that the DNS server
|
||||
// terminates connections more
|
||||
// gracefully.
|
||||
log.Fatalf("[unexpected] error watching configuration: %v", err)
|
||||
log.Fatalf("[unexpected] error watching services configuration: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
// TODO (irbekrm): this runs in a
|
||||
// container that will be thrown away,
|
||||
// so this should be ok. But maybe still
|
||||
// need to ensure that the DNS server
|
||||
// terminates connections more
|
||||
// gracefully.
|
||||
log.Fatalf("[unexpected] errors watcher exited")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err = watcher.Add(defaultDNSConfigDir); err != nil {
|
||||
log.Fatalf("failed setting up a watcher for the mounted ConfigMap: %v", err)
|
||||
for _, name := range proxies {
|
||||
if err = watcher.Add(filepath.Join("/services", name)); err != nil {
|
||||
log.Fatalf("failed setting up a watcher for config for %s : %v", name, err)
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -341,14 +352,14 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
||||
}
|
||||
}
|
||||
|
||||
// lookupIP4 returns any IPv4 addresses for the given FQDN from nameserver's
|
||||
// in-memory records.
|
||||
func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP {
|
||||
if n.ip4 == nil {
|
||||
return nil
|
||||
func proxyConfigReader(proxy string) ([]byte, error) {
|
||||
path := filepath.Join("/services", proxy, "proxyConfig")
|
||||
if bs, err := os.ReadFile(path); err == nil {
|
||||
return bs, err
|
||||
} else if os.IsNotExist(err) {
|
||||
log.Printf("path %s does not exist", path)
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("error reading %s: %w", path, err)
|
||||
}
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
f := n.ip4[fqdn]
|
||||
return f
|
||||
}
|
||||
|
28
cmd/k8s-operator/HA.md
Normal file
28
cmd/k8s-operator/HA.md
Normal file
@ -0,0 +1,28 @@
|
||||
To try out:
|
||||
(This is the order in which I am testing this prototype. It may or may not work in a different order)
|
||||
- from this branch run
|
||||
```
|
||||
helm upgrade --install operator ./cmd/k8s-operator/deploy/chart/ -n tailscale --set operatorConfig.image.repo=gcr.io/csi-test-290908/operator --set operatorConfig.image.tag=v0.0.16proxycidr --set proxyConfig.image.repo=gcr.io/csi-test-290908/proxy --set proxyConfig.image.tag=v0.0.15proxycidr --set oauth.clientId=<oauth-client-id> --set oauth.clientSecret=<oauth-client-secret> operatorConfig.logging=debug --create-namespace
|
||||
```
|
||||
|
||||
- run `kubectl apply -f ./cmd/k8s-operator/deploy/examples/clusterconfig.yaml`
|
||||
^ but you want to modify the domain before to not point at my tailnet
|
||||
This will create an STS with 4 replicas in tailscale namespace
|
||||
|
||||
- create some cluster ingress Service
|
||||
Each proxy should set up firewall rules to expose the service on one of the IPs it's advertizing
|
||||
|
||||
- to test that it works so far- for one of the proxies, figure out what service IP it is advertizing the
|
||||
cluster service on (i.e by looking at proxies-0 ConfigMap in tailscale namespace) and attempt
|
||||
to access that from a client that has `--accept-routes` set to true.
|
||||
|
||||
- run `kubectl apply -f ./cmd/k8s-operator/deploy/examples/dnsconfig.yaml`
|
||||
This will create a nameserver that is currently not on tailnet.
|
||||
You should be able to <dig @nameservers-cluster-ip <dns-name-of-your-service> and get back one of the tailnet IPs that the proxies expose this service on.
|
||||
|
||||
Next steps:
|
||||
- expose the nameserver, maybe on an operator egress?
|
||||
|
||||
Notes:
|
||||
- right now, machines hardcoded to 4, range hardcoded to "100.64.2.0/26", "100.64.2.64/26", "100.64.2.128/26", "100.64.2.192/26"
|
||||
Operator creates a StatefulSet with 4 replicas for an applied ClusterConfig
|
@ -159,11 +159,11 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
// maybeProvisionConnector ensures that any new resources required for this
|
||||
// Connector instance are deployed to the cluster.
|
||||
func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) error {
|
||||
hostname := cn.Name + "-connector"
|
||||
if cn.Spec.Hostname != "" {
|
||||
hostname = string(cn.Spec.Hostname)
|
||||
}
|
||||
crl := childResourceLabels(cn.Name, a.tsnamespace, "connector")
|
||||
// hostname := cn.Name + "-connector"
|
||||
// if cn.Spec.Hostname != "" {
|
||||
// hostname = string(cn.Spec.Hostname)
|
||||
// }
|
||||
// crl := childResourceLabels(cn.Name, a.tsnamespace, "connector")
|
||||
|
||||
proxyClass := cn.Spec.ProxyClass
|
||||
if proxyClass != "" {
|
||||
@ -175,33 +175,33 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
||||
}
|
||||
}
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
ParentResourceName: cn.Name,
|
||||
ParentResourceUID: string(cn.UID),
|
||||
Hostname: hostname,
|
||||
ChildResourceLabels: crl,
|
||||
Tags: cn.Spec.Tags.Stringify(),
|
||||
Connector: &connector{
|
||||
isExitNode: cn.Spec.ExitNode,
|
||||
},
|
||||
ProxyClass: proxyClass,
|
||||
}
|
||||
// sts := &tailscaleSTSConfig{
|
||||
// ParentResourceName: cn.Name,
|
||||
// ParentResourceUID: string(cn.UID),
|
||||
// Hostname: hostname,
|
||||
// ChildResourceLabels: crl,
|
||||
// Tags: cn.Spec.Tags.Stringify(),
|
||||
// Connector: &connector{
|
||||
// isExitNode: cn.Spec.ExitNode,
|
||||
// },
|
||||
// ProxyClass: proxyClass,
|
||||
// }
|
||||
|
||||
if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
||||
sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
|
||||
}
|
||||
// if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
|
||||
// sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
|
||||
// }
|
||||
|
||||
a.mu.Lock()
|
||||
if sts.Connector.isExitNode {
|
||||
a.exitNodes.Add(cn.UID)
|
||||
} else {
|
||||
a.exitNodes.Remove(cn.UID)
|
||||
}
|
||||
if sts.Connector.routes != "" {
|
||||
a.subnetRouters.Add(cn.GetUID())
|
||||
} else {
|
||||
a.subnetRouters.Remove(cn.GetUID())
|
||||
}
|
||||
// a.mu.Lock()
|
||||
// if sts.Connector.isExitNode {
|
||||
// a.exitNodes.Add(cn.UID)
|
||||
// } else {
|
||||
// a.exitNodes.Remove(cn.UID)
|
||||
// }
|
||||
// if sts.Connector.routes != "" {
|
||||
// a.subnetRouters.Add(cn.GetUID())
|
||||
// } else {
|
||||
// a.subnetRouters.Remove(cn.GetUID())
|
||||
// }
|
||||
a.mu.Unlock()
|
||||
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
|
||||
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
|
||||
@ -210,8 +210,8 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
|
||||
connectors.AddSlice(a.subnetRouters.Slice())
|
||||
gaugeConnectorResources.Set(int64(connectors.Len()))
|
||||
|
||||
_, err := a.ssr.Provision(ctx, logger, sts)
|
||||
return err
|
||||
// _, err := a.ssr.Provision(ctx, logger, sts)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
|
||||
|
@ -27,6 +27,9 @@ rules:
|
||||
- apiGroups: ["tailscale.com"]
|
||||
resources: ["dnsconfigs", "dnsconfigs/status"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["tailscale.com"]
|
||||
resources: ["clusterconfigs", "clusterconfigs/status"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
@ -56,6 +59,13 @@ rules:
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
- rolebindings
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: servicerecords
|
@ -0,0 +1,66 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.13.0
|
||||
name: clusterconfigs.tailscale.com
|
||||
spec:
|
||||
group: tailscale.com
|
||||
names:
|
||||
kind: ClusterConfig
|
||||
listKind: ClusterConfigList
|
||||
plural: clusterconfigs
|
||||
singular: clusterconfig
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: 'More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
|
||||
type: object
|
||||
required:
|
||||
- domain
|
||||
properties:
|
||||
domain:
|
||||
description: like 'foo.tailbd97a.ts.net' for services like 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?
|
||||
type: string
|
||||
status:
|
||||
description: ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.
|
||||
type: object
|
||||
required:
|
||||
- proxyNodes
|
||||
properties:
|
||||
proxyNodes:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- magicDNSName
|
||||
- serviceCIDR
|
||||
- tailnetIPs
|
||||
properties:
|
||||
magicDNSName:
|
||||
type: string
|
||||
serviceCIDR:
|
||||
type: string
|
||||
tailnetIPs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
8
cmd/k8s-operator/deploy/examples/clusterconfig.yaml
Normal file
8
cmd/k8s-operator/deploy/examples/clusterconfig.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
# It would be nice if users didn't need to apply this separately, but not sure
|
||||
# how to integrate this into the chart (post-render hook?)
|
||||
apiVersion: tailscale.com/v1alpha1
|
||||
kind: ClusterConfig
|
||||
metadata:
|
||||
name: proxies
|
||||
spec:
|
||||
domain: "foo.bar." # must have the dot at the moment
|
@ -14,6 +14,5 @@ spec:
|
||||
hostname: ts-prod
|
||||
subnetRouter:
|
||||
advertiseRoutes:
|
||||
- "10.40.0.0/14"
|
||||
- "192.168.0.0/14"
|
||||
exitNode: true
|
||||
- "10.0.0.0/8"
|
||||
exitNode: false
|
||||
|
@ -5,5 +5,5 @@ metadata:
|
||||
spec:
|
||||
nameserver:
|
||||
image:
|
||||
repo: tailscale/k8s-nameserver
|
||||
tag: unstable-v1.65
|
||||
repo: gcr.io/csi-test-290908/nameserver
|
||||
tag: v0.0.23proxycidr
|
||||
|
@ -28,6 +28,12 @@ spec:
|
||||
volumeMounts:
|
||||
- name: dnsrecords
|
||||
mountPath: /config
|
||||
env:
|
||||
- name: TS_AUTHKEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: nameserver-key
|
||||
key: ts_auth_key
|
||||
restartPolicy: Always
|
||||
serviceAccount: nameserver
|
||||
serviceAccountName: nameserver
|
||||
|
11
cmd/k8s-operator/deploy/manifests/nameserver/role.yaml
Normal file
11
cmd/k8s-operator/deploy/manifests/nameserver/role.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: nameserver
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- '*'
|
@ -0,0 +1,11 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: nameserver
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: nameserver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nameserver
|
4
cmd/k8s-operator/deploy/manifests/nameserver/secret.yaml
Normal file
4
cmd/k8s-operator/deploy/manifests/nameserver/secret.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: nameserver-key
|
@ -27,6 +27,12 @@ metadata:
|
||||
name: proxies
|
||||
namespace: tailscale
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: servicerecords
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
@ -1359,6 +1365,16 @@ rules:
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- tailscale.com
|
||||
resources:
|
||||
- clusterconfigs
|
||||
- clusterconfigs/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
@ -1402,6 +1418,13 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
- rolebindings
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
|
@ -26,6 +26,10 @@ spec:
|
||||
env:
|
||||
- name: TS_USERSPACE
|
||||
value: "false"
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -9,7 +9,6 @@
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@ -248,32 +247,32 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
||||
}
|
||||
|
||||
crl := childResourceLabels(ing.Name, ing.Namespace, "ingress")
|
||||
var tags []string
|
||||
if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||
tags = strings.Split(tstr, ",")
|
||||
}
|
||||
hostname := ing.Namespace + "-" + ing.Name + "-ingress"
|
||||
if tlsHost != "" {
|
||||
hostname, _, _ = strings.Cut(tlsHost, ".")
|
||||
}
|
||||
// var tags []string
|
||||
// if tstr, ok := ing.Annotations[AnnotationTags]; ok {
|
||||
// tags = strings.Split(tstr, ",")
|
||||
// }
|
||||
// hostname := ing.Namespace + "-" + ing.Name + "-ingress"
|
||||
// if tlsHost != "" {
|
||||
// hostname, _, _ = strings.Cut(tlsHost, ".")
|
||||
// }
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
Hostname: hostname,
|
||||
ParentResourceName: ing.Name,
|
||||
ParentResourceUID: string(ing.UID),
|
||||
ServeConfig: sc,
|
||||
Tags: tags,
|
||||
ChildResourceLabels: crl,
|
||||
ProxyClass: proxyClass,
|
||||
}
|
||||
// sts := &tailscaleSTSConfig{
|
||||
// Hostname: hostname,
|
||||
// ParentResourceName: ing.Name,
|
||||
// ParentResourceUID: string(ing.UID),
|
||||
// ServeConfig: sc,
|
||||
// Tags: tags,
|
||||
// ChildResourceLabels: crl,
|
||||
// ProxyClass: proxyClass,
|
||||
// }
|
||||
|
||||
if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
||||
sts.ForwardClusterTrafficViaL7IngressProxy = true
|
||||
}
|
||||
// if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
|
||||
// sts.ForwardClusterTrafficViaL7IngressProxy = true
|
||||
// }
|
||||
|
||||
if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||
return fmt.Errorf("failed to provision: %w", err)
|
||||
}
|
||||
// if _, err := a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||
// return fmt.Errorf("failed to provision: %w", err)
|
||||
// }
|
||||
|
||||
_, tsHost, _, err := a.ssr.DeviceInfo(ctx, crl)
|
||||
if err != nil {
|
||||
|
@ -8,6 +8,7 @@
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
@ -18,6 +19,7 @@
|
||||
xslices "golang.org/x/exp/slices"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -26,10 +28,12 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/yaml"
|
||||
"tailscale.com/client/tailscale"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
@ -53,10 +57,12 @@
|
||||
// response to users applying DNSConfig.
|
||||
type NameserverReconciler struct {
|
||||
client.Client
|
||||
tsClient tsClient
|
||||
logger *zap.SugaredLogger
|
||||
recorder record.EventRecorder
|
||||
clock tstime.Clock
|
||||
tsNamespace string
|
||||
defaultTags []string
|
||||
|
||||
mu sync.Mutex // protects following
|
||||
managedNameservers set.Slice[types.UID] // one or none
|
||||
@ -171,6 +177,8 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
||||
labels: labels,
|
||||
imageRepo: defaultNameserverImageRepo,
|
||||
imageTag: defaultNameserverImageTag,
|
||||
tsClient: a.tsClient,
|
||||
tags: a.defaultTags,
|
||||
}
|
||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" {
|
||||
dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo
|
||||
@ -178,7 +186,7 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
||||
if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" {
|
||||
dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag
|
||||
}
|
||||
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} {
|
||||
for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable, secretDeployable, roleDeployable, roleBindingDeployable} {
|
||||
if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil {
|
||||
return fmt.Errorf("error reconciling %s: %w", deployable.kind, err)
|
||||
}
|
||||
@ -197,6 +205,23 @@ func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.D
|
||||
return nil
|
||||
}
|
||||
|
||||
func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) {
|
||||
caps := tailscale.KeyCapabilities{
|
||||
Devices: tailscale.KeyDeviceCapabilities{
|
||||
Create: tailscale.KeyDeviceCreateCapabilities{
|
||||
Reusable: false,
|
||||
Preauthorized: true,
|
||||
Tags: tags,
|
||||
},
|
||||
},
|
||||
}
|
||||
key, _, err := tsClient.CreateKey(ctx, caps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
type deployable struct {
|
||||
kind string
|
||||
updateObj func(context.Context, *deployConfig, client.Client) error
|
||||
@ -208,6 +233,8 @@ type deployConfig struct {
|
||||
labels map[string]string
|
||||
ownerRefs []metav1.OwnerReference
|
||||
namespace string
|
||||
tsClient tsClient
|
||||
tags []string
|
||||
}
|
||||
|
||||
var (
|
||||
@ -219,6 +246,12 @@ type deployConfig struct {
|
||||
saYaml []byte
|
||||
//go:embed deploy/manifests/nameserver/svc.yaml
|
||||
svcYaml []byte
|
||||
//go:embed deploy/manifests/nameserver/secret.yaml
|
||||
secretYaml []byte
|
||||
//go:embed deploy/manifests/nameserver/role.yaml
|
||||
roleYaml []byte
|
||||
//go:embed deploy/manifests/nameserver/rolebinding.yaml
|
||||
rolebindingYaml []byte
|
||||
|
||||
deployDeployable = deployable{
|
||||
kind: "Deployment",
|
||||
@ -234,7 +267,33 @@ type deployConfig struct {
|
||||
updateF := func(oldD *appsv1.Deployment) {
|
||||
oldD.Spec = d.Spec
|
||||
}
|
||||
_, err := createOrUpdate[appsv1.Deployment](ctx, kubeClient, cfg.namespace, d, updateF)
|
||||
// Get all proxy ConfigMaps and mount them
|
||||
cmList := &corev1.ConfigMapList{}
|
||||
sel, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"component": "proxies"}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating label selector: %w", err)
|
||||
}
|
||||
if err := kubeClient.List(ctx, cmList, &client.ListOptions{LabelSelector: sel}); err != nil {
|
||||
return fmt.Errorf("error listing ConfigMaps: %w", err)
|
||||
}
|
||||
for _, cm := range cmList.Items {
|
||||
volume := corev1.Volume{
|
||||
Name: cm.Name,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: cm.Name},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeMount := corev1.VolumeMount{
|
||||
Name: cm.Name,
|
||||
MountPath: fmt.Sprintf("/services/%s", cm.Name),
|
||||
ReadOnly: true,
|
||||
}
|
||||
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume)
|
||||
d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
|
||||
}
|
||||
_, err = createOrUpdate[appsv1.Deployment](ctx, kubeClient, cfg.namespace, d, updateF)
|
||||
return err
|
||||
},
|
||||
}
|
||||
@ -266,6 +325,37 @@ type deployConfig struct {
|
||||
return err
|
||||
},
|
||||
}
|
||||
secretDeployable = deployable{
|
||||
kind: "Secret",
|
||||
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||
secret := new(corev1.Secret)
|
||||
if err := yaml.Unmarshal(secretYaml, &secret); err != nil {
|
||||
return fmt.Errorf("error unmarshalling Secret yaml: %w", err)
|
||||
}
|
||||
// TODO: make the nameserver tsnet Server actually store state in kube secret
|
||||
secret.ObjectMeta.Labels = cfg.labels
|
||||
secret.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||
secret.ObjectMeta.Namespace = cfg.namespace
|
||||
// Get the secret
|
||||
oldS := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "nameserver-key", Namespace: cfg.namespace},
|
||||
}
|
||||
if err := kubeClient.Get(ctx, client.ObjectKeyFromObject(oldS), oldS); apierrors.IsNotFound(err) {
|
||||
key, err := newAuthKey(ctx, cfg.tsClient, cfg.tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating new auth key: %w", err)
|
||||
}
|
||||
// write it to the Secret
|
||||
mak.Set(&secret.StringData, "ts_auth_key", key)
|
||||
return kubeClient.Create(ctx, secret)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error looking up 'dnsrecords' Secret: %w", err)
|
||||
} else {
|
||||
log.Printf("'nameserver-key' Secret exists, do nothing")
|
||||
return nil
|
||||
}
|
||||
},
|
||||
}
|
||||
cmDeployable = deployable{
|
||||
kind: "ConfigMap",
|
||||
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||
@ -280,4 +370,32 @@ type deployConfig struct {
|
||||
return err
|
||||
},
|
||||
}
|
||||
roleDeployable = deployable{
|
||||
kind: "Role",
|
||||
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||
role := new(rbacv1.Role)
|
||||
if err := yaml.Unmarshal(roleYaml, &role); err != nil {
|
||||
return fmt.Errorf("error unmarshalling Role yaml: %w", err)
|
||||
}
|
||||
role.ObjectMeta.Labels = cfg.labels
|
||||
role.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||
role.ObjectMeta.Namespace = cfg.namespace
|
||||
_, err := createOrUpdate[rbacv1.Role](ctx, kubeClient, cfg.namespace, role, func(*rbacv1.Role) {})
|
||||
return err
|
||||
},
|
||||
}
|
||||
roleBindingDeployable = deployable{
|
||||
kind: "RoleBinding",
|
||||
updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error {
|
||||
rb := new(rbacv1.RoleBinding)
|
||||
if err := yaml.Unmarshal(rolebindingYaml, &rb); err != nil {
|
||||
return fmt.Errorf("error unmarshalling RoleBinding yaml: %w", err)
|
||||
}
|
||||
rb.ObjectMeta.Labels = cfg.labels
|
||||
rb.ObjectMeta.OwnerReferences = cfg.ownerRefs
|
||||
rb.ObjectMeta.Namespace = cfg.namespace
|
||||
_, err := createOrUpdate[rbacv1.RoleBinding](ctx, kubeClient, cfg.namespace, rb, func(*rbacv1.RoleBinding) {})
|
||||
return err
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -22,6 +22,7 @@
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
@ -238,6 +239,8 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
&appsv1.StatefulSet{}: nsFilter,
|
||||
&appsv1.Deployment{}: nsFilter,
|
||||
&discoveryv1.EndpointSlice{}: nsFilter,
|
||||
&rbacv1.Role{}: nsFilter,
|
||||
&rbacv1.RoleBinding{}: nsFilter,
|
||||
},
|
||||
},
|
||||
Scheme: tsapi.GlobalScheme,
|
||||
@ -282,51 +285,66 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create service reconciler: %v", err)
|
||||
}
|
||||
ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress"))
|
||||
// If a ProxyClassChanges, enqueue all Ingresses labeled with that
|
||||
// ProxyClass's name.
|
||||
proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog))
|
||||
// Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes.
|
||||
svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog))
|
||||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
For(&networkingv1.Ingress{}).
|
||||
Watches(&appsv1.StatefulSet{}, ingressChildFilter).
|
||||
Watches(&corev1.Secret{}, ingressChildFilter).
|
||||
Watches(&corev1.Service{}, svcHandlerForIngress).
|
||||
Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress).
|
||||
Complete(&IngressReconciler{
|
||||
ssr: ssr,
|
||||
recorder: eventRecorder,
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("ingress-reconciler"),
|
||||
Named("proxies-reconciler").
|
||||
For(&tsapi.ClusterConfig{}).
|
||||
Complete(&proxiesReconciler{
|
||||
ssr: ssr,
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("proxies-reconciler"),
|
||||
recorder: eventRecorder,
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create ingress reconciler: %v", err)
|
||||
startlog.Fatalf("could not create proxies reconciler: %v", err)
|
||||
}
|
||||
// ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress"))
|
||||
// // If a ProxyClassChanges, enqueue all Ingresses labeled with that
|
||||
// // ProxyClass's name.
|
||||
// proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog))
|
||||
// // Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes.
|
||||
// svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog))
|
||||
// err = builder.
|
||||
// ControllerManagedBy(mgr).
|
||||
// For(&networkingv1.Ingress{}).
|
||||
// Watches(&appsv1.StatefulSet{}, ingressChildFilter).
|
||||
// Watches(&corev1.Secret{}, ingressChildFilter).
|
||||
// Watches(&corev1.Service{}, svcHandlerForIngress).
|
||||
// Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress).
|
||||
// Complete(&IngressReconciler{
|
||||
// ssr: ssr,
|
||||
// recorder: eventRecorder,
|
||||
// Client: mgr.GetClient(),
|
||||
// logger: opts.log.Named("ingress-reconciler"),
|
||||
// })
|
||||
// if err != nil {
|
||||
// startlog.Fatalf("could not create ingress reconciler: %v", err)
|
||||
// }
|
||||
|
||||
connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
||||
// If a ProxyClassChanges, enqueue all Connectors that have
|
||||
// .spec.proxyClass set to the name of this ProxyClass.
|
||||
proxyClassFilterForConnector := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForConnector(mgr.GetClient(), startlog))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
For(&tsapi.Connector{}).
|
||||
Watches(&appsv1.StatefulSet{}, connectorFilter).
|
||||
Watches(&corev1.Secret{}, connectorFilter).
|
||||
Watches(&tsapi.ProxyClass{}, proxyClassFilterForConnector).
|
||||
Complete(&ConnectorReconciler{
|
||||
ssr: ssr,
|
||||
recorder: eventRecorder,
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("connector-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create connector reconciler: %v", err)
|
||||
}
|
||||
// connectorFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("connector"))
|
||||
// // If a ProxyClassChanges, enqueue all Connectors that have
|
||||
// // .spec.proxyClass set to the name of this ProxyClass.
|
||||
// proxyClassFilterForConnector := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForConnector(mgr.GetClient(), startlog))
|
||||
// err = builder.ControllerManagedBy(mgr).
|
||||
// For(&tsapi.Connector{}).
|
||||
// Watches(&appsv1.StatefulSet{}, connectorFilter).
|
||||
// Watches(&corev1.Secret{}, connectorFilter).
|
||||
// Watches(&tsapi.ProxyClass{}, proxyClassFilterForConnector).
|
||||
// Complete(&ConnectorReconciler{
|
||||
// ssr: ssr,
|
||||
// recorder: eventRecorder,
|
||||
// Client: mgr.GetClient(),
|
||||
// logger: opts.log.Named("connector-reconciler"),
|
||||
// clock: tstime.DefaultClock{},
|
||||
// })
|
||||
// if err != nil {
|
||||
// startlog.Fatalf("could not create connector reconciler: %v", err)
|
||||
// }
|
||||
// TODO (irbekrm): switch to metadata-only watches for resources whose
|
||||
// spec we don't need to inspect to reduce memory consumption.
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/1159
|
||||
// TODO: watch for proxy config ConfigMap change events
|
||||
nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver"))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
For(&tsapi.DNSConfig{}).
|
||||
@ -337,52 +355,54 @@ func runReconcilers(opts reconcilerOpts) {
|
||||
Complete(&NameserverReconciler{
|
||||
recorder: eventRecorder,
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
tsClient: opts.tsClient,
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("nameserver-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create nameserver reconciler: %v", err)
|
||||
}
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
For(&tsapi.ProxyClass{}).
|
||||
Complete(&ProxyClassReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
recorder: eventRecorder,
|
||||
logger: opts.log.Named("proxyclass-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatal("could not create proxyclass reconciler: %v", err)
|
||||
}
|
||||
logger := startlog.Named("dns-records-reconciler-event-handlers")
|
||||
// On EndpointSlice events, if it is an EndpointSlice for an
|
||||
// ingress/egress proxy headless Service, reconcile the headless
|
||||
// Service.
|
||||
dnsRREpsOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerEndpointSliceHandler)
|
||||
// On DNSConfig changes, reconcile all headless Services for
|
||||
// ingress/egress proxies in operator namespace.
|
||||
dnsRRDNSConfigOpts := handler.EnqueueRequestsFromMapFunc(enqueueAllIngressEgressProxySvcsInNS(opts.tailscaleNamespace, mgr.GetClient(), logger))
|
||||
// On Service events, if it is an ingress/egress proxy headless Service, reconcile it.
|
||||
dnsRRServiceOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerServiceHandler)
|
||||
// On Ingress events, if it is a tailscale Ingress or if tailscale is the default ingress controller, reconcile the proxy
|
||||
// headless Service.
|
||||
dnsRRIngressOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerIngressHandler(opts.tailscaleNamespace, opts.proxyActAsDefaultLoadBalancer, mgr.GetClient(), logger))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
Named("dns-records-reconciler").
|
||||
Watches(&corev1.Service{}, dnsRRServiceOpts).
|
||||
Watches(&networkingv1.Ingress{}, dnsRRIngressOpts).
|
||||
Watches(&discoveryv1.EndpointSlice{}, dnsRREpsOpts).
|
||||
Watches(&tsapi.DNSConfig{}, dnsRRDNSConfigOpts).
|
||||
Complete(&dnsRecordsReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
logger: opts.log.Named("dns-records-reconciler"),
|
||||
isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create DNS records reconciler: %v", err)
|
||||
}
|
||||
// err = builder.ControllerManagedBy(mgr).
|
||||
// For(&tsapi.ProxyClass{}).
|
||||
// Complete(&ProxyClassReconciler{
|
||||
// Client: mgr.GetClient(),
|
||||
// recorder: eventRecorder,
|
||||
// logger: opts.log.Named("proxyclass-reconciler"),
|
||||
// clock: tstime.DefaultClock{},
|
||||
// })
|
||||
// if err != nil {
|
||||
// startlog.Fatal("could not create proxyclass reconciler: %v", err)
|
||||
// }
|
||||
// logger := startlog.Named("dns-records-reconciler-event-handlers")
|
||||
// // On EndpointSlice events, if it is an EndpointSlice for an
|
||||
// // ingress/egress proxy headless Service, reconcile the headless
|
||||
// // Service.
|
||||
// dnsRREpsOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerEndpointSliceHandler)
|
||||
// // On DNSConfig changes, reconcile all headless Services for
|
||||
// // ingress/egress proxies in operator namespace.
|
||||
// dnsRRDNSConfigOpts := handler.EnqueueRequestsFromMapFunc(enqueueAllIngressEgressProxySvcsInNS(opts.tailscaleNamespace, mgr.GetClient(), logger))
|
||||
// // On Service events, if it is an ingress/egress proxy headless Service, reconcile it.
|
||||
// dnsRRServiceOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerServiceHandler)
|
||||
// // On Ingress events, if it is a tailscale Ingress or if tailscale is the default ingress controller, reconcile the proxy
|
||||
// // headless Service.
|
||||
// dnsRRIngressOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerIngressHandler(opts.tailscaleNamespace, opts.proxyActAsDefaultLoadBalancer, mgr.GetClient(), logger))
|
||||
// err = builder.ControllerManagedBy(mgr).
|
||||
// Named("dns-records-reconciler").
|
||||
// Watches(&corev1.Service{}, dnsRRServiceOpts).
|
||||
// Watches(&networkingv1.Ingress{}, dnsRRIngressOpts).
|
||||
// Watches(&discoveryv1.EndpointSlice{}, dnsRREpsOpts).
|
||||
// Watches(&tsapi.DNSConfig{}, dnsRRDNSConfigOpts).
|
||||
// Complete(&dnsRecordsReconciler{
|
||||
// Client: mgr.GetClient(),
|
||||
// tsNamespace: opts.tailscaleNamespace,
|
||||
// logger: opts.log.Named("dns-records-reconciler"),
|
||||
// isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
||||
// })
|
||||
// if err != nil {
|
||||
// startlog.Fatalf("could not create DNS records reconciler: %v", err)
|
||||
// }
|
||||
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
startlog.Fatalf("could not start manager: %v", err)
|
||||
|
94
cmd/k8s-operator/proxies.go
Normal file
94
cmd/k8s-operator/proxies.go
Normal file
@ -0,0 +1,94 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.uber.org/zap"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
)
|
||||
|
||||
type proxiesReconciler struct {
|
||||
client.Client
|
||||
logger *zap.SugaredLogger
|
||||
|
||||
recorder record.EventRecorder
|
||||
ssr *tailscaleSTSReconciler
|
||||
|
||||
tsNamespace string
|
||||
}
|
||||
|
||||
func (pr *proxiesReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
||||
logger := pr.logger.With("ClusterConfig", req.Name)
|
||||
logger.Debugf("starting reconcile")
|
||||
defer logger.Debugf("reconcile finished")
|
||||
|
||||
cc := new(tsapi.ClusterConfig)
|
||||
err = pr.Get(ctx, req.NamespacedName, cc)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Request object not found, could have been deleted after reconcile request.
|
||||
logger.Debugf("ClusterConfig not found, assuming it was deleted")
|
||||
return reconcile.Result{}, nil
|
||||
} else if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get ClusterConfig: %w", err)
|
||||
}
|
||||
ownerRef := metav1.NewControllerRef(cc, tsapi.SchemeGroupVersion.WithKind("ClusterConfig"))
|
||||
|
||||
// For this prototype the number of proxy nodes is hardcoded to 4,
|
||||
// Service CIDR range hardcoded to 100.64.2.0/24
|
||||
// https://www.davidc.net/sites/default/subnets/subnets.html
|
||||
cidrs := []string{"100.64.2.0/26", "100.64.2.64/26", "100.64.2.128/26", "100.64.2.192/26"}
|
||||
stsCfg := &tailscaleSTSConfig{
|
||||
name: cc.Name,
|
||||
serviceCIDRs: cidrs,
|
||||
clusterConfOwnerRef: ownerRef,
|
||||
}
|
||||
if err = pr.ssr.Provision(ctx, logger, stsCfg); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("error provision proxy: %w", err)
|
||||
}
|
||||
// logger.Debugf("finished reconciling index %d ", i)
|
||||
// Now watch for Secret changes, pull out device info and update cluster config status
|
||||
return reconcile.Result{}, nil
|
||||
|
||||
// // build opts
|
||||
// stsCfg := &tailscaleSTSConfig{
|
||||
// Tags: []string{"tag:k8s"},
|
||||
// HostnameTemplate: class.Name,
|
||||
// serviceClass: class.Name,
|
||||
// dnsAddr: cidr.Addr(),
|
||||
// serviceCIDR: []netip.Prefix{cidr},
|
||||
// numProxies: class.NumProxies,
|
||||
// }
|
||||
// defaultClassCIDR = []netip.Prefix{cidr}
|
||||
|
||||
// // write DNS addr to the ServiceRecords ConfigMap
|
||||
// cm := &corev1.ConfigMap{}
|
||||
// if err := pr.Get(ctx, types.NamespacedName{Namespace: pr.tsNamespace, Name: "servicerecords"}, cm); err != nil {
|
||||
// return reconcile.Result{}, fmt.Errorf("error getting serviceRecords ConfigMap: %w", err)
|
||||
// }
|
||||
|
||||
// var serviceRecords *kube.Records
|
||||
// if serviceRecordsB := cm.BinaryData["serviceRecords"]; len(serviceRecordsB) == 0 {
|
||||
// serviceRecords = &kube.Records{Version: kube.Alpha1Version}
|
||||
// } else {
|
||||
// if err := json.Unmarshal(cm.BinaryData["serviceRecords"], serviceRecords); err != nil {
|
||||
// return reconcile.Result{}, fmt.Errorf("error unmarshalling service records: %w", err)
|
||||
// }
|
||||
// }
|
||||
// // Remove, this will only get passed as env var to the proxies
|
||||
// if dnsAddr := serviceRecords.DNSAddr; dnsAddr != "" {
|
||||
// logger.Info("DNS addr already set to %s", dnsAddr)
|
||||
// return reconcile.Result{}, nil
|
||||
// }
|
||||
// dnsAddr := defaultClassCIDR[0].Addr()
|
||||
// serviceRecords.DNSAddr = dnsAddr.String()
|
||||
// serviceRecordsB, err := json.Marshal(serviceRecords)
|
||||
// cm.BinaryData["serviceRecords"] = serviceRecordsB
|
||||
|
||||
// return reconcile.Result{}, pr.Update(ctx, cm)
|
||||
}
|
52
cmd/k8s-operator/proxynodes.go
Normal file
52
cmd/k8s-operator/proxynodes.go
Normal file
@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/netip"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// TODO: probably remove this file
|
||||
func proxycidr() {
|
||||
clusterDomain := os.Getenv("TS_CLUSTER_DOMAIN")
|
||||
if clusterDomain == "" {
|
||||
log.Fatal("TS_CLUSTER_DOMAIN must be set")
|
||||
}
|
||||
// TODO: check if domain already exists for a different CIDR; if so make <cluster-domain>-<n>
|
||||
|
||||
// Allocate /24 and set /1 to resolve DNS for this subdomain?
|
||||
serviceCIDR := os.Getenv("TS_SERVICE_CIDR")
|
||||
if serviceCIDR == "" {
|
||||
log.Fatal("TS_SERVICE_CIDR must be set")
|
||||
}
|
||||
clusterSize := os.Getenv("TS_CLUSTER_SIZE")
|
||||
if clusterSize == "" {
|
||||
log.Fatal("TS_CLUSTER_SIZE must be set")
|
||||
}
|
||||
|
||||
// create clusterSize proxies, each advertizes /24
|
||||
nProxies, err := strconv.Atoi(clusterSize)
|
||||
if err != nil {
|
||||
log.Fatalf("%s can not be converted to int: %v", clusterSize, err)
|
||||
}
|
||||
for range nProxies - 1 {
|
||||
}
|
||||
}
|
||||
|
||||
func ensureProxyExists(n int) {
|
||||
const (
|
||||
labelserviceClass = "tailscale.com/service-class"
|
||||
labelProxyID = "tailscale.com/proxy-id"
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
type service struct {
|
||||
ip netip.Addr
|
||||
domainName string
|
||||
}
|
||||
|
||||
type dnsConfig struct {
|
||||
dnsNamesToIPs map[string][]netip.Addr
|
||||
}
|
@ -13,6 +13,7 @@
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
@ -25,6 +26,7 @@
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
"tailscale.com/client/tailscale"
|
||||
@ -32,7 +34,6 @@
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/ptr"
|
||||
@ -103,29 +104,38 @@
|
||||
)
|
||||
|
||||
type tailscaleSTSConfig struct {
|
||||
ParentResourceName string
|
||||
ParentResourceUID string
|
||||
ChildResourceLabels map[string]string
|
||||
// serviceClass string
|
||||
// dnsAddr netip.Addr
|
||||
// numProxies int
|
||||
name string
|
||||
serviceCIDRs []string
|
||||
clusterConfOwnerRef *metav1.OwnerReference
|
||||
|
||||
ServeConfig *ipn.ServeConfig // if serve config is set, this is a proxy for Ingress
|
||||
ClusterTargetIP string // ingress target IP
|
||||
ClusterTargetDNSName string // ingress target DNS name
|
||||
// If set to true, operator should configure containerboot to forward
|
||||
// cluster traffic via the proxy set up for Kubernetes Ingress.
|
||||
ForwardClusterTrafficViaL7IngressProxy bool
|
||||
Tags []string // if empty, use defaultTags
|
||||
ProxyClass string
|
||||
|
||||
TailnetTargetIP string // egress target IP
|
||||
// ChildResourceLabels map[string]string
|
||||
|
||||
TailnetTargetFQDN string // egress target FQDN
|
||||
// ServeConfig *ipn.ServeConfig // if serve config is set, this is a proxy for Ingress
|
||||
// ClusterTargetIP string // ingress target IP
|
||||
// ClusterTargetDNSName string // ingress target DNS name
|
||||
// // If set to true, operator should configure containerboot to forward
|
||||
// // cluster traffic via the proxy set up for Kubernetes Ingress.
|
||||
// ForwardClusterTrafficViaL7IngressProxy bool
|
||||
|
||||
Hostname string
|
||||
Tags []string // if empty, use defaultTags
|
||||
// TailnetTargetIP string // egress target IP
|
||||
|
||||
// TailnetTargetFQDN string // egress target FQDN
|
||||
|
||||
// Hostname will be <hostnameTemplate>-<n> where 'n' is the numeric id
|
||||
// of the proxy
|
||||
// HostnameTemplate string
|
||||
|
||||
// Connector specifies a configuration of a Connector instance if that's
|
||||
// what this StatefulSet should be created for.
|
||||
Connector *connector
|
||||
|
||||
ProxyClass string
|
||||
// Connector *connector
|
||||
// ParentResourceName string
|
||||
// ParentResourceUID string
|
||||
}
|
||||
|
||||
type connector struct {
|
||||
@ -161,26 +171,45 @@ func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool {
|
||||
return len(a.tsnetServer.CertDomains()) > 0
|
||||
}
|
||||
|
||||
// Provision ensures that the StatefulSet for the given service is running and
|
||||
// up to date.
|
||||
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) {
|
||||
// Provision provisions a StatefulSet with n replicas for each proxy class.
|
||||
func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) error {
|
||||
// Do full reconcile.
|
||||
// TODO (don't create Service for the Connector)
|
||||
hsvc, err := a.reconcileHeadlessService(ctx, logger, sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
|
||||
// for i := 0; i < sts.numProxies; i++ {
|
||||
|
||||
// }
|
||||
|
||||
// TODO: the headless Service is needed for cluster workloads to be able
|
||||
// to reach the egress proxies. Move the creation of this out of this
|
||||
// code altogether and create one for each exposed tailnet service in
|
||||
// services-reconciler.
|
||||
// hsvc, err := a.reconcileHeadlessService(ctx, logger, sts, sts.hostnameBase)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to reconcile headless service: %w", err)
|
||||
// }
|
||||
|
||||
// Create Secret for each proxy replica
|
||||
for i, cidrS := range sts.serviceCIDRs {
|
||||
cidr, err := netip.ParsePrefix(cidrS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing %s: %w", cidrS, err)
|
||||
}
|
||||
_, _, _, err = a.createOrGetSecret(ctx, logger, sts, i, cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
}
|
||||
_, err = a.createOrGetCM(ctx, logger, sts, i, cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create or get services ConfigMap: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
|
||||
// TODO: fix tsConfigHash
|
||||
_, err := a.reconcileSTS(ctx, logger, sts, "fakeconfighash")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
return fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
_, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
|
||||
return hsvc, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup removes all resources associated that were created by Provision with
|
||||
@ -269,34 +298,63 @@ func statefulSetNameBase(parent string) string {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) {
|
||||
nameBase := statefulSetNameBase(sts.ParentResourceName)
|
||||
func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, hostname string) (*corev1.Service, error) {
|
||||
logger.Debugf("reconciling headless svc", "hostname", hostname)
|
||||
nameBase := statefulSetNameBase(hostname)
|
||||
hsvc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: nameBase,
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: sts.ChildResourceLabels,
|
||||
GenerateName: nameBase,
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: map[string]string{"app": hostname},
|
||||
OwnerReferences: []metav1.OwnerReference{*sts.clusterConfOwnerRef},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "None",
|
||||
Selector: map[string]string{
|
||||
"app": sts.ParentResourceUID,
|
||||
"app": hostname,
|
||||
},
|
||||
},
|
||||
}
|
||||
logger.Debugf("reconciling headless service for StatefulSet")
|
||||
logger.Debugf("reconciling headless service for StatefulSet", "namebase", nameBase)
|
||||
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
|
||||
}
|
||||
func (a *tailscaleSTSReconciler) createOrGetCM(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, i int, cidr netip.Prefix) (string, error) {
|
||||
hostname := fmt.Sprintf("%s-%d", stsC.name, i)
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hostname,
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: map[string]string{"proxy-index": hostname, "proxies": stsC.name, "component": "proxies"},
|
||||
OwnerReferences: []metav1.OwnerReference{*stsC.clusterConfOwnerRef},
|
||||
},
|
||||
}
|
||||
if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) {
|
||||
proxyConfig := &kubeutils.ProxyConfig{
|
||||
ServicesCIDRRange: cidr,
|
||||
}
|
||||
proxyConfigBytes, err := json.Marshal(proxyConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling config for proxy %s: %w", hostname, err)
|
||||
}
|
||||
mak.Set(&cm.BinaryData, "proxyConfig", proxyConfigBytes)
|
||||
logger.Infof("creating services ConfigMap %s", hostname)
|
||||
return hostname, a.Create(ctx, cm)
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("error getting ConfigMap %s", hostname)
|
||||
}
|
||||
// For this prototype, the Services CIDR written to the ConfigMap is
|
||||
// never updated.
|
||||
return hostname, nil
|
||||
}
|
||||
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) {
|
||||
func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, i int, serviceCIDR netip.Prefix) (secretName, hash string, configs tailscaleConfigs, _ error) {
|
||||
hostname := fmt.Sprintf("%s-%d", stsC.name, i)
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Hardcode a -0 suffix so that in future, if we support
|
||||
// multiple StatefulSet replicas, we can provision -N for
|
||||
// those.
|
||||
Name: hsvc.Name + "-0",
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: stsC.ChildResourceLabels,
|
||||
Name: hostname,
|
||||
Namespace: a.operatorNamespace,
|
||||
Labels: map[string]string{"app": hostname},
|
||||
OwnerReferences: []metav1.OwnerReference{*stsC.clusterConfOwnerRef},
|
||||
},
|
||||
}
|
||||
var orig *corev1.Secret // unmodified copy of secret
|
||||
@ -312,7 +370,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
||||
// Initially it contains only tailscaled config, but when the
|
||||
// proxy starts, it will also store there the state, certs and
|
||||
// ACME account key.
|
||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels)
|
||||
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, map[string]string{"app": hostname})
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
@ -334,7 +392,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
||||
return "", "", nil, err
|
||||
}
|
||||
}
|
||||
configs, err := tailscaledConfig(stsC, authKey, orig)
|
||||
configs, err := tailscaledConfig(stsC, authKey, orig, hostname, serviceCIDR)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err)
|
||||
}
|
||||
@ -358,14 +416,6 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
|
||||
}
|
||||
}
|
||||
|
||||
if stsC.ServeConfig != nil {
|
||||
j, err := json.Marshal(stsC.ServeConfig)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
mak.Set(&secret.StringData, "serve-config", string(j))
|
||||
}
|
||||
|
||||
if orig != nil {
|
||||
logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig))
|
||||
if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil {
|
||||
@ -445,22 +495,22 @@ func (a *tailscaleSTSReconciler) newAuthKey(ctx context.Context, tags []string)
|
||||
//go:embed deploy/manifests/userspace-proxy.yaml
|
||||
var userspaceProxyYaml []byte
|
||||
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) {
|
||||
func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, tsConfigHash string) (*appsv1.StatefulSet, error) {
|
||||
ss := new(appsv1.StatefulSet)
|
||||
if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
||||
if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal userspace proxy spec: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
}
|
||||
for i := range ss.Spec.Template.Spec.InitContainers {
|
||||
c := &ss.Spec.Template.Spec.InitContainers[i]
|
||||
if c.Name == "sysctler" {
|
||||
c.Image = a.proxyImage
|
||||
break
|
||||
}
|
||||
// if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
|
||||
// if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
|
||||
// return nil, fmt.Errorf("failed to unmarshal userspace proxy spec: %v", err)
|
||||
// }
|
||||
// } else {
|
||||
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
}
|
||||
for i := range ss.Spec.Template.Spec.InitContainers {
|
||||
c := &ss.Spec.Template.Spec.InitContainers[i]
|
||||
if c.Name == "sysctler" {
|
||||
c.Image = a.proxyImage
|
||||
break
|
||||
}
|
||||
}
|
||||
pod := &ss.Spec.Template
|
||||
@ -477,64 +527,82 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
}
|
||||
container.Image = a.proxyImage
|
||||
ss.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: headlessSvc.Name,
|
||||
Namespace: a.operatorNamespace,
|
||||
Name: sts.name,
|
||||
Namespace: a.operatorNamespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*sts.clusterConfOwnerRef},
|
||||
}
|
||||
for key, val := range sts.ChildResourceLabels {
|
||||
mak.Set(&ss.ObjectMeta.Labels, key, val)
|
||||
}
|
||||
ss.Spec.ServiceName = headlessSvc.Name
|
||||
ss.Spec.Selector = &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": sts.ParentResourceUID,
|
||||
"app": sts.name,
|
||||
},
|
||||
}
|
||||
mak.Set(&pod.Labels, "app", sts.ParentResourceUID)
|
||||
for key, val := range sts.ChildResourceLabels {
|
||||
pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod
|
||||
}
|
||||
ss.Spec.Replicas = pointer.Int32(int32(len(sts.serviceCIDRs)))
|
||||
mak.Set(&pod.Labels, "app", sts.name)
|
||||
|
||||
// Generic containerboot configuration options.
|
||||
container.Env = append(container.Env,
|
||||
corev1.EnvVar{
|
||||
Name: "TS_KUBE_SECRET",
|
||||
Value: proxySecret,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
// Old tailscaled config key is still used for backwards compatibility.
|
||||
Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
|
||||
Value: "/etc/tsconfig/tailscaled",
|
||||
Name: "TS_KUBE_SECRET",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"},
|
||||
},
|
||||
},
|
||||
// No backwards compat here
|
||||
// corev1.EnvVar{
|
||||
// // Old tailscaled config key is still used for backwards compatibility.
|
||||
// Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
|
||||
// Value: "/etc/tsconfig/tailscaled",
|
||||
// },
|
||||
corev1.EnvVar{
|
||||
// New style is in the form of cap-<capability-version>.hujson.
|
||||
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
|
||||
Value: "/etc/tsconfig",
|
||||
Value: "/etc/tsconfig/$(POD_NAME)",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_EXPERIMENTAL_SERVICES_CONFIG_PATH",
|
||||
Value: "/etc/$(POD_NAME)",
|
||||
},
|
||||
)
|
||||
if sts.ForwardClusterTrafficViaL7IngressProxy {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS",
|
||||
Value: "true",
|
||||
|
||||
// Mount all tailscaled configs, each Pod only reads the one from
|
||||
// $(POD_NAME) Secret.
|
||||
// There is no way how to mount a different Secret for each replica.
|
||||
for i := range len(sts.serviceCIDRs) {
|
||||
// Mount the individual tailscaled state for each replica
|
||||
configVolume := corev1.Volume{
|
||||
Name: fmt.Sprintf("tailscaledconfig-%d", i),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: fmt.Sprintf("%s-%d", sts.name, i),
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: fmt.Sprintf("tailscaledconfig-%d", i),
|
||||
ReadOnly: true,
|
||||
MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", sts.name, i),
|
||||
})
|
||||
|
||||
servicesConfigV := corev1.Volume{
|
||||
Name: fmt.Sprintf("servicesconfig-%d", i),
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: fmt.Sprintf("%s-%d", sts.name, i)},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, servicesConfigV)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: fmt.Sprintf("servicesconfig-%d", i),
|
||||
ReadOnly: true,
|
||||
MountPath: fmt.Sprintf("/etc/%s-%d", sts.name, i),
|
||||
})
|
||||
}
|
||||
|
||||
// Configure containeboot to run tailscaled with a configfile read from the state Secret.
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, tsConfigHash)
|
||||
|
||||
configVolume := corev1.Volume{
|
||||
Name: "tailscaledconfig",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume)
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "tailscaledconfig",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tsconfig",
|
||||
})
|
||||
|
||||
if a.tsFirewallMode != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_DEBUG_FIREWALL_MODE",
|
||||
@ -543,51 +611,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
|
||||
}
|
||||
pod.Spec.PriorityClassName = a.proxyPriorityClassName
|
||||
|
||||
// Ingress/egress proxy configuration options.
|
||||
if sts.ClusterTargetIP != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_DEST_IP",
|
||||
Value: sts.ClusterTargetIP,
|
||||
})
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetClusterIP, sts.ClusterTargetIP)
|
||||
} else if sts.ClusterTargetDNSName != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_EXPERIMENTAL_DEST_DNS_NAME",
|
||||
Value: sts.ClusterTargetDNSName,
|
||||
})
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetClusterDNSName, sts.ClusterTargetDNSName)
|
||||
} else if sts.TailnetTargetIP != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_TAILNET_TARGET_IP",
|
||||
Value: sts.TailnetTargetIP,
|
||||
})
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetTailnetTargetIP, sts.TailnetTargetIP)
|
||||
} else if sts.TailnetTargetFQDN != "" {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_TAILNET_TARGET_FQDN",
|
||||
Value: sts.TailnetTargetFQDN,
|
||||
})
|
||||
mak.Set(&ss.Spec.Template.Annotations, podAnnotationLastSetTailnetTargetFQDN, sts.TailnetTargetFQDN)
|
||||
} else if sts.ServeConfig != nil {
|
||||
container.Env = append(container.Env, corev1.EnvVar{
|
||||
Name: "TS_SERVE_CONFIG",
|
||||
Value: "/etc/tailscaled/serve-config",
|
||||
})
|
||||
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
|
||||
Name: "serve-config",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/tailscaled",
|
||||
})
|
||||
pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
|
||||
Name: "serve-config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: proxySecret,
|
||||
Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName())
|
||||
if sts.ProxyClass != "" {
|
||||
logger.Debugf("configuring proxy resources with ProxyClass %s", sts.ProxyClass)
|
||||
@ -626,22 +649,22 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
|
||||
if pc == nil || ss == nil {
|
||||
return ss
|
||||
}
|
||||
if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
|
||||
if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||
enableMetrics(ss, pc)
|
||||
} else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||
// TODO (irbekrm): fix this
|
||||
// For Ingress proxies that have been configured with
|
||||
// tailscale.com/experimental-forward-cluster-traffic-via-ingress
|
||||
// annotation, all cluster traffic is forwarded to the
|
||||
// Ingress backend(s).
|
||||
logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||
} else {
|
||||
// TODO (irbekrm): fix this
|
||||
// For egress proxies, currently all cluster traffic is forwarded to the tailnet target.
|
||||
logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||
}
|
||||
}
|
||||
// if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
|
||||
// if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||
// enableMetrics(ss, pc)
|
||||
// } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
|
||||
// // TODO (irbekrm): fix this
|
||||
// // For Ingress proxies that have been configured with
|
||||
// // tailscale.com/experimental-forward-cluster-traffic-via-ingress
|
||||
// // annotation, all cluster traffic is forwarded to the
|
||||
// // Ingress backend(s).
|
||||
// logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||
// } else {
|
||||
// // TODO (irbekrm): fix this
|
||||
// // For egress proxies, currently all cluster traffic is forwarded to the tailnet target.
|
||||
// logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
|
||||
// }
|
||||
// }
|
||||
|
||||
if pc.Spec.StatefulSet == nil {
|
||||
return ss
|
||||
@ -742,29 +765,21 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) {
|
||||
// TODO (irbekrm): remove the legacy config once we no longer need to support
|
||||
// versions older than cap94,
|
||||
// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) {
|
||||
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret, hostname string, serviceCIDR netip.Prefix) (tailscaleConfigs, error) {
|
||||
conf := &ipn.ConfigVAlpha{
|
||||
Version: "alpha0",
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: &stsC.Hostname,
|
||||
NoStatefulFiltering: "false",
|
||||
Version: "alpha0",
|
||||
AcceptDNS: "false",
|
||||
AcceptRoutes: "false", // AcceptRoutes defaults to true
|
||||
Locked: "false",
|
||||
Hostname: pointer.String(hostname),
|
||||
AdvertiseRoutes: []netip.Prefix{serviceCIDR},
|
||||
|
||||
// TODO: either we switch stateful filter off for all proxies or
|
||||
// we cannot share nodes between ingress and egress proxies
|
||||
// Although this is now off by default?
|
||||
NoStatefulFiltering: "true",
|
||||
}
|
||||
|
||||
// For egress proxies only, we need to ensure that stateful filtering is
|
||||
// not in place so that traffic from cluster can be forwarded via
|
||||
// Tailscale IPs.
|
||||
if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" {
|
||||
conf.NoStatefulFiltering = "true"
|
||||
}
|
||||
if stsC.Connector != nil {
|
||||
routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calculating routes: %w", err)
|
||||
}
|
||||
conf.AdvertiseRoutes = routes
|
||||
}
|
||||
if newAuthkey != "" {
|
||||
conf.AuthKey = &newAuthkey
|
||||
} else if oldSecret != nil {
|
||||
|
@ -7,12 +7,16 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand/v2"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -20,16 +24,19 @@
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/net/dns/resolvconffile"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
const (
|
||||
resolvConfPath = "/etc/resolv.conf"
|
||||
defaultClusterDomain = "cluster.local"
|
||||
resolvConfPath = "/etc/resolv.conf"
|
||||
defaultClusterDomain = "cluster.local"
|
||||
serviceDNSNameAnnotation = "tailscale.com/service-dns-name"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
@ -90,13 +97,6 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
} else if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
|
||||
}
|
||||
targetIP := tailnetTargetAnnotation(svc)
|
||||
targetFQDN := svc.Annotations[AnnotationTailnetTargetFQDN]
|
||||
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) && targetIP == "" && targetFQDN == "" {
|
||||
logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up")
|
||||
return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, a.maybeProvision(ctx, logger, svc)
|
||||
}
|
||||
|
||||
@ -150,145 +150,85 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
||||
// This function adds a finalizer to svc, ensuring that we can handle orderly
|
||||
// deprovisioning later.
|
||||
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
|
||||
// Run for proxy config related validations here as opposed to running
|
||||
// them earlier. This is to prevent cleanup being blocked on a
|
||||
// misconfigured proxy param.
|
||||
if err := a.ssr.validate(); err != nil {
|
||||
msg := fmt.Sprintf("unable to provision proxy resources: invalid config: %v", err)
|
||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDCONFIG", msg)
|
||||
a.logger.Error(msg)
|
||||
return nil
|
||||
}
|
||||
if violations := validateService(svc); len(violations) > 0 {
|
||||
msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", "))
|
||||
a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVCICE", msg)
|
||||
a.logger.Error(msg)
|
||||
// Take a look at the Service
|
||||
// If it is an ingress Service (expose annotation or load balancer)
|
||||
// Add a record to the config map
|
||||
|
||||
// This prototype only looks at ingress Services
|
||||
if !a.shouldExpose(svc) {
|
||||
return nil
|
||||
}
|
||||
|
||||
proxyClass := proxyClassForObject(svc)
|
||||
if proxyClass != "" {
|
||||
if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil {
|
||||
return fmt.Errorf("error verifying ProxyClass for Service: %w", err)
|
||||
} else if !ready {
|
||||
logger.Infof("ProxyClass %s specified for the Service, but is not (yet) Ready, waiting..", proxyClass)
|
||||
return nil
|
||||
}
|
||||
// get clusterconfig
|
||||
// Exactly one ClusterConfig needs to exist, else we don't proceed.
|
||||
ccl := &tsapi.ClusterConfigList{}
|
||||
if err := a.List(ctx, ccl); err != nil {
|
||||
return fmt.Errorf("error listing ClusterConfigs: %w", err)
|
||||
}
|
||||
|
||||
hostname, err := nameForService(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
if len(ccl.Items) < 1 {
|
||||
logger.Info("got %d ClusterConfigs", len(ccl.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
if !slices.Contains(svc.Finalizers, FinalizerName) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
// because once the finalizer is in place this block gets skipped. So,
|
||||
// this is a nice place to tell the operator that the high level,
|
||||
// multi-reconcile operation is underway.
|
||||
logger.Infof("exposing service over tailscale")
|
||||
svc.Finalizers = append(svc.Finalizers, FinalizerName)
|
||||
if err := a.Update(ctx, svc); err != nil {
|
||||
return fmt.Errorf("failed to add finalizer: %w", err)
|
||||
}
|
||||
}
|
||||
crl := childResourceLabels(svc.Name, svc.Namespace, "svc")
|
||||
var tags []string
|
||||
if tstr, ok := svc.Annotations[AnnotationTags]; ok {
|
||||
tags = strings.Split(tstr, ",")
|
||||
}
|
||||
|
||||
sts := &tailscaleSTSConfig{
|
||||
ParentResourceName: svc.Name,
|
||||
ParentResourceUID: string(svc.UID),
|
||||
Hostname: hostname,
|
||||
Tags: tags,
|
||||
ChildResourceLabels: crl,
|
||||
ProxyClass: proxyClass,
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
if a.shouldExposeClusterIP(svc) {
|
||||
sts.ClusterTargetIP = svc.Spec.ClusterIP
|
||||
a.managedIngressProxies.Add(svc.UID)
|
||||
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
|
||||
} else if a.shouldExposeDNSName(svc) {
|
||||
sts.ClusterTargetDNSName = svc.Spec.ExternalName
|
||||
a.managedIngressProxies.Add(svc.UID)
|
||||
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
|
||||
} else if ip := tailnetTargetAnnotation(svc); ip != "" {
|
||||
sts.TailnetTargetIP = ip
|
||||
a.managedEgressProxies.Add(svc.UID)
|
||||
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
|
||||
} else if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" {
|
||||
fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn = fqdn + "."
|
||||
}
|
||||
sts.TailnetTargetFQDN = fqdn
|
||||
a.managedEgressProxies.Add(svc.UID)
|
||||
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
|
||||
}
|
||||
a.mu.Unlock()
|
||||
|
||||
var hsvc *corev1.Service
|
||||
if hsvc, err = a.ssr.Provision(ctx, logger, sts); err != nil {
|
||||
return fmt.Errorf("failed to provision: %w", err)
|
||||
}
|
||||
|
||||
if sts.TailnetTargetIP != "" || sts.TailnetTargetFQDN != "" {
|
||||
clusterDomain := retrieveClusterDomain(a.tsNamespace, logger)
|
||||
headlessSvcName := hsvc.Name + "." + hsvc.Namespace + ".svc." + clusterDomain
|
||||
if svc.Spec.ExternalName != headlessSvcName || svc.Spec.Type != corev1.ServiceTypeExternalName {
|
||||
svc.Spec.ExternalName = headlessSvcName
|
||||
svc.Spec.Selector = nil
|
||||
svc.Spec.Type = corev1.ServiceTypeExternalName
|
||||
if err := a.Update(ctx, svc); err != nil {
|
||||
return fmt.Errorf("failed to update service: %w", err)
|
||||
}
|
||||
}
|
||||
if svc.Spec.ClusterIP == "" {
|
||||
logger.Info("[unexpected] Service has no ClusterIP")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) {
|
||||
logger.Debugf("service is not a LoadBalancer, so not updating ingress")
|
||||
return nil
|
||||
}
|
||||
cc := ccl.Items[0]
|
||||
svcDNSName := a.fqdnsForSvc(svc, cc.Spec.Domain)
|
||||
logger.Debugf("determined DNS name %s", svcDNSName)
|
||||
|
||||
_, tsHost, tsIPs, err := a.ssr.DeviceInfo(ctx, crl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device ID: %w", err)
|
||||
// Get all ConfigMaps for all proxies
|
||||
cmList := &corev1.ConfigMapList{}
|
||||
if err := a.List(ctx, cmList); err != nil {
|
||||
return fmt.Errorf("error listing proxy ConfigMaps: %w", err)
|
||||
}
|
||||
if tsHost == "" {
|
||||
logger.Debugf("no Tailscale hostname known yet, waiting for proxy pod to finish auth")
|
||||
// No hostname yet. Wait for the proxy pod to auth.
|
||||
svc.Status.LoadBalancer.Ingress = nil
|
||||
if err := a.Status().Update(ctx, svc); err != nil {
|
||||
return fmt.Errorf("failed to update service status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debugf("setting ingress to %q, %s", tsHost, strings.Join(tsIPs, ", "))
|
||||
ingress := []corev1.LoadBalancerIngress{
|
||||
{Hostname: tsHost},
|
||||
}
|
||||
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse cluster IP: %w", err)
|
||||
}
|
||||
for _, ip := range tsIPs {
|
||||
addr, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
for _, cm := range cmList.Items {
|
||||
pcB := cm.BinaryData["proxyConfig"]
|
||||
if len(pcB) == 0 {
|
||||
a.logger.Info("[unexpected] ConfigMap %s does not contain proxyConfig", cm.Name)
|
||||
continue
|
||||
}
|
||||
if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family
|
||||
ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip})
|
||||
pc := &kubeutils.ProxyConfig{}
|
||||
if err := json.Unmarshal(pcB, pc); err != nil {
|
||||
return fmt.Errorf("error unmarshalling proxyconfig for proxy %s: %w", cm.Name, err)
|
||||
}
|
||||
}
|
||||
svc.Status.LoadBalancer.Ingress = ingress
|
||||
if err := a.Status().Update(ctx, svc); err != nil {
|
||||
return fmt.Errorf("failed to update service status: %w", err)
|
||||
// does it have the service name already?
|
||||
if _, ok := pc.Services[svcDNSName]; ok {
|
||||
logger.Debugf("service %s already configured for proxy %s; do nothing", svcDNSName, cm.Name)
|
||||
// TODO: check if the record is correct
|
||||
continue
|
||||
}
|
||||
|
||||
// pick an IP
|
||||
ip := unusedIPv4(pc.ServicesCIDRRange, pc.AddrsToDomain)
|
||||
if pc.AddrsToDomain == nil {
|
||||
pc.AddrsToDomain = &bart.Table[string]{}
|
||||
}
|
||||
pc.AddrsToDomain.Insert(netip.PrefixFrom(ip, ip.BitLen()), svcDNSName)
|
||||
clusterIP, err := netip.ParseAddr(svc.Spec.ClusterIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling Service Cluster IP %v: %w", svc.Spec.ClusterIP, err)
|
||||
}
|
||||
svcConfig := kubeutils.Service{
|
||||
V4ServiceIPs: []netip.Addr{ip},
|
||||
FQDN: svcDNSName,
|
||||
Ingress: &kubeutils.Ingress{
|
||||
Type: "tcp", // currently unused
|
||||
V4Backends: []netip.Addr{clusterIP},
|
||||
},
|
||||
}
|
||||
logger.Info("assigning Service IP %v to %s", ip, svcDNSName)
|
||||
mak.Set(&pc.Services, svcDNSName, svcConfig)
|
||||
pcB, err = json.Marshal(pc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling ConfigMap for proxy %s: %w", cm.Name, err)
|
||||
}
|
||||
mak.Set(&cm.BinaryData, "proxyConfig", pcB)
|
||||
if err := a.Update(ctx, &cm); err != nil {
|
||||
return fmt.Errorf("error updating ConfigMap %s: %w", cm.Name, err)
|
||||
}
|
||||
logger.Info("ConfigMap %s updated with a record for %s", cm.Name, svcDNSName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -320,6 +260,12 @@ func (a *ServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool {
|
||||
}
|
||||
return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc)
|
||||
}
|
||||
func (a *ServiceReconciler) fqdnsForSvc(svc *corev1.Service, clusterDomain string) string {
|
||||
if annot := svc.Annotations["tailscale.com/svc-name"]; annot != "" {
|
||||
return annot + "." + clusterDomain
|
||||
}
|
||||
return svc.Name + "-" + svc.Namespace + "." + clusterDomain
|
||||
}
|
||||
|
||||
func isTailscaleLoadBalancerService(svc *corev1.Service, isDefaultLoadBalancer bool) bool {
|
||||
return svc != nil &&
|
||||
@ -407,3 +353,39 @@ func clusterDomainFromResolverConf(conf *resolvconffile.Config, namespace string
|
||||
logger.Infof("Cluster domain %q extracted from resolver config", probablyClusterDomain)
|
||||
return probablyClusterDomain
|
||||
}
|
||||
|
||||
func unusedIPv4(serviceCIDR netip.Prefix, usedIPs *bart.Table[string]) netip.Addr {
|
||||
ip := randV4(serviceCIDR)
|
||||
if usedIPs == nil {
|
||||
return ip // first IP being assigned
|
||||
}
|
||||
for serviceCIDR.Contains(ip) {
|
||||
if !isIPUsed(ip, usedIPs) {
|
||||
return ip
|
||||
}
|
||||
ip = ip.Next()
|
||||
}
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
func isIPUsed(ip netip.Addr, usedIPs *bart.Table[string]) bool {
|
||||
_, ok := usedIPs.Get(ip)
|
||||
return ok
|
||||
}
|
||||
|
||||
// randV4 returns a random IPv4 address within the given prefix.
|
||||
func randV4(maskedPfx netip.Prefix) netip.Addr {
|
||||
bits := 32 - maskedPfx.Bits()
|
||||
randBits := rand.Uint32N(1 << uint(bits))
|
||||
|
||||
ip4 := maskedPfx.Addr().As4()
|
||||
pn := binary.BigEndian.Uint32(ip4[:])
|
||||
binary.BigEndian.PutUint32(ip4[:], randBits|pn)
|
||||
return netip.AddrFrom4(ip4)
|
||||
}
|
||||
|
||||
// domainForIP returns the domain name assigned to the given IP address and
|
||||
// whether it was found.
|
||||
// func domainForIP(ip netip.Addr, serviceRecords ) (string, bool) {
|
||||
// return ps.addrToDomain.Get(ip)
|
||||
// }
|
||||
|
11
go.mod
11
go.mod
@ -26,7 +26,7 @@ require (
|
||||
github.com/dsnet/try v0.0.3
|
||||
github.com/evanw/esbuild v0.19.11
|
||||
github.com/frankban/quicktest v1.14.6
|
||||
github.com/fxamacker/cbor/v2 v2.5.0
|
||||
github.com/fxamacker/cbor/v2 v2.6.0
|
||||
github.com/gaissmai/bart v0.4.1
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
@ -89,14 +89,14 @@ require (
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||
golang.org/x/mod v0.14.0
|
||||
golang.org/x/mod v0.15.0
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/oauth2 v0.16.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.17.0
|
||||
golang.org/x/tools v0.18.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
@ -106,6 +106,7 @@ require (
|
||||
k8s.io/apimachinery v0.29.1
|
||||
k8s.io/apiserver v0.29.1
|
||||
k8s.io/client-go v0.29.1
|
||||
k8s.io/kubernetes v1.30.1
|
||||
nhooyr.io/websocket v1.8.10
|
||||
sigs.k8s.io/controller-runtime v0.16.2
|
||||
sigs.k8s.io/controller-tools v0.13.0
|
||||
@ -213,7 +214,7 @@ require (
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||
@ -370,7 +371,7 @@ require (
|
||||
k8s.io/apiextensions-apiserver v0.29.1 // indirect
|
||||
k8s.io/component-base v0.29.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
|
||||
mvdan.cc/gofumpt v0.5.0 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
|
34
go.sum
34
go.sum
@ -290,8 +290,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
|
||||
github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
||||
github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA=
|
||||
github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls=
|
||||
@ -396,8 +396,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
|
||||
@ -694,10 +694,10 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
|
||||
@ -960,8 +960,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
@ -1034,8 +1034,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -1281,8 +1281,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1445,8 +1445,10 @@ k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw=
|
||||
k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 h1:m6dl1pkxz3HuE2mP9MUYPCCGyy6IIFlv/vTlLBDxIwA=
|
||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/kubernetes v1.30.1 h1:XlqS6KslLEA5mQzLK2AJrhr4Z1m8oJfkhHiWJ5lue+I=
|
||||
k8s.io/kubernetes v1.30.1/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0=
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
|
||||
|
@ -8,6 +8,8 @@ Packages:
|
||||
|
||||
Resource Types:
|
||||
|
||||
- [ClusterConfig](#clusterconfig)
|
||||
|
||||
- [Connector](#connector)
|
||||
|
||||
- [DNSConfig](#dnsconfig)
|
||||
@ -17,6 +19,154 @@ Resource Types:
|
||||
|
||||
|
||||
|
||||
## ClusterConfig
|
||||
<sup><sup>[↩ Parent](#tailscalecomv1alpha1 )</sup></sup>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
<th>Required</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody><tr>
|
||||
<td><b>apiVersion</b></td>
|
||||
<td>string</td>
|
||||
<td>tailscale.com/v1alpha1</td>
|
||||
<td>true</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>kind</b></td>
|
||||
<td>string</td>
|
||||
<td>ClusterConfig</td>
|
||||
<td>true</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta">metadata</a></b></td>
|
||||
<td>object</td>
|
||||
<td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
|
||||
<td>true</td>
|
||||
</tr><tr>
|
||||
<td><b><a href="#clusterconfigspec">spec</a></b></td>
|
||||
<td>object</td>
|
||||
<td>
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr><tr>
|
||||
<td><b><a href="#clusterconfigstatus">status</a></b></td>
|
||||
<td>object</td>
|
||||
<td>
|
||||
ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.<br/>
|
||||
</td>
|
||||
<td>false</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
|
||||
|
||||
### ClusterConfig.spec
|
||||
<sup><sup>[↩ Parent](#clusterconfig)</sup></sup>
|
||||
|
||||
|
||||
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
<th>Required</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody><tr>
|
||||
<td><b>domain</b></td>
|
||||
<td>string</td>
|
||||
<td>
|
||||
like 'foo.tailbd97a.ts.net' for services like 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
|
||||
|
||||
### ClusterConfig.status
|
||||
<sup><sup>[↩ Parent](#clusterconfig)</sup></sup>
|
||||
|
||||
|
||||
|
||||
ClusterConfigStatus describes the status of the ClusterConfig. This is set and managed by the Tailscale operator.
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
<th>Required</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody><tr>
|
||||
<td><b><a href="#clusterconfigstatusproxynodesindex">proxyNodes</a></b></td>
|
||||
<td>[]object</td>
|
||||
<td>
|
||||
<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
|
||||
|
||||
### ClusterConfig.status.proxyNodes[index]
|
||||
<sup><sup>[↩ Parent](#clusterconfigstatus)</sup></sup>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
<th>Required</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody><tr>
|
||||
<td><b>magicDNSName</b></td>
|
||||
<td>string</td>
|
||||
<td>
|
||||
<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr><tr>
|
||||
<td><b>serviceCIDR</b></td>
|
||||
<td>string</td>
|
||||
<td>
|
||||
<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr><tr>
|
||||
<td><b>tailnetIPs</b></td>
|
||||
<td>[]string</td>
|
||||
<td>
|
||||
<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
|
||||
## Connector
|
||||
<sup><sup>[↩ Parent](#tailscalecomv1alpha1 )</sup></sup>
|
||||
|
||||
|
@ -49,7 +49,7 @@ func init() {
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{})
|
||||
scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{}, &ClusterConfig{}, &ClusterConfigList{})
|
||||
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
67
k8s-operator/apis/v1alpha1/types_clusterconfig.go
Normal file
67
k8s-operator/apis/v1alpha1/types_clusterconfig.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
var ClusterConfigKind = "ClusterConfig"
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
type ClusterConfig struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// More info:
|
||||
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
Spec ClusterConfigSpec `json:"spec"`
|
||||
|
||||
// ClusterConfigStatus describes the status of the ClusterConfig. This
|
||||
// is set and managed by the Tailscale operator.
|
||||
// +optional
|
||||
Status ClusterConfigStatus `json:"status"`
|
||||
}
|
||||
|
||||
type ClusterConfigSpec struct {
|
||||
// like 'foo.tailbd97a.ts.net' for services like
|
||||
// 'my-svc.foo.tailbd97a.ts.net'. Or, should be just 'foo'?
|
||||
Domain string `json:"domain"`
|
||||
|
||||
// TODO: number of proxies + cidr should be under a class- different
|
||||
// classes should allow for different number of nodes
|
||||
|
||||
// Hardcoded to 4 for this prototype
|
||||
// NumProxies int `json:"numProxies"`
|
||||
|
||||
// Hardcoded to 100.64.2.0/24 for this prototype.
|
||||
// Question: is there a better way for users to allocate an unused CIDR
|
||||
// than forcing IPs for all other nodes to a different CIDR via
|
||||
// https://tailscale.com/kb/1304/ip-pool?
|
||||
// CIDRv4 string `json:"cidrv4"`
|
||||
|
||||
// TODO: CIDRv6
|
||||
}
|
||||
|
||||
type ClusterConfigStatus struct {
|
||||
ProxyNodes []ProxyNode `json:"proxyNodes"`
|
||||
}
|
||||
|
||||
type ProxyNode struct {
|
||||
MagicDNSName string `json:"magicDNSName"`
|
||||
TailnetIPs []string `json:"tailnetIPs"`
|
||||
ServiceCIDR string `json:"serviceCIDR"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
type ClusterConfigList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []ClusterConfig `json:"items"`
|
||||
}
|
@ -12,6 +12,102 @@
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig.
|
||||
func (in *ClusterConfig) DeepCopy() *ClusterConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterConfig) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterConfigList) DeepCopyInto(out *ClusterConfigList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigList.
|
||||
func (in *ClusterConfigList) DeepCopy() *ClusterConfigList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterConfigList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterConfigList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterConfigSpec) DeepCopyInto(out *ClusterConfigSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigSpec.
|
||||
func (in *ClusterConfigSpec) DeepCopy() *ClusterConfigSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterConfigSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterConfigStatus) DeepCopyInto(out *ClusterConfigStatus) {
|
||||
*out = *in
|
||||
if in.ProxyNodes != nil {
|
||||
in, out := &in.ProxyNodes, &out.ProxyNodes
|
||||
*out = make([]ProxyNode, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigStatus.
|
||||
func (in *ClusterConfigStatus) DeepCopy() *ClusterConfigStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterConfigStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Connector) DeepCopyInto(out *Connector) {
|
||||
*out = *in
|
||||
@ -523,6 +619,26 @@ func (in *ProxyClassStatus) DeepCopy() *ProxyClassStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyNode) DeepCopyInto(out *ProxyNode) {
|
||||
*out = *in
|
||||
if in.TailnetIPs != nil {
|
||||
in, out := &in.TailnetIPs, &out.TailnetIPs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyNode.
|
||||
func (in *ProxyNode) DeepCopy() *ProxyNode {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyNode)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Routes) DeepCopyInto(out *Routes) {
|
||||
{
|
||||
|
@ -7,10 +7,14 @@
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// TODO: move all this to ./kube
|
||||
|
||||
const (
|
||||
Alpha1Version = "v1alpha1"
|
||||
|
||||
@ -24,8 +28,17 @@ type Records struct {
|
||||
// k8s-nameserver must verify that it knows how to parse a given
|
||||
// version.
|
||||
Version string `json:"version"`
|
||||
|
||||
// This will go- this will only contain ingress/egress destinations, not what
|
||||
// service IPs this is assigned to.
|
||||
|
||||
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
||||
IP4 map[string][]string `json:"ip4"`
|
||||
// TODO: probably don't need this here
|
||||
AddrsToDomain *bart.Table[string] `json:"addrsToDomain"`
|
||||
// Probably should not be a string so that don't need to parse twice
|
||||
// TODO: remove from here
|
||||
DNSAddr string `json:"dnsAddr"`
|
||||
}
|
||||
|
||||
// TailscaledConfigFileNameForCap returns a tailscaled config file name in
|
||||
@ -47,3 +60,25 @@ func CapVerFromFileName(name string) (tailcfg.CapabilityVersion, error) {
|
||||
_, err := fmt.Sscanf(name, "cap-%d.hujson", &cap)
|
||||
return cap, err
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
// Maybe we don't need to put this one here- it's just convenient for
|
||||
// the services reconciler to read it from here.
|
||||
ServicesCIDRRange netip.Prefix `json:"serviceCIDR,omitempty"`
|
||||
Services map[string]Service `json:"services,omitempty"`
|
||||
|
||||
// For lookup convenience
|
||||
AddrsToDomain *bart.Table[string] `json:"addrsToDomain,omitempty"`
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
FQDN string `json:"fqdn,omitempty"`
|
||||
V4ServiceIPs []netip.Addr `json:"vService4ips"`
|
||||
Ingress *Ingress `json:"ingress"`
|
||||
}
|
||||
|
||||
type Ingress struct {
|
||||
Type string `json:"type"` // tcp or http
|
||||
// type?
|
||||
V4Backends []netip.Addr `json:"v4Backends"`
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user