mirror of
https://github.com/tailscale/tailscale.git
synced 2025-12-30 07:54:18 +00:00
WIP: allow cluster Pods to route to any tailnet service
Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
48
cmd/containerboot/egresscoalesce.md
Normal file
48
cmd/containerboot/egresscoalesce.md
Normal file
@@ -0,0 +1,48 @@
|
||||
This is a prototype for how to make any tailnet service accessible from cluster without creating individual egress Services for each.
|
||||
|
||||
## To try it out
|
||||
|
||||
- create a reusable auth key and update ./egressc.yaml with it
|
||||
|
||||
- kubectl apply -f ./egressc.yaml
|
||||
|
||||
- update kube-dns/CoreDNS to route all traffic for ts.net to 100.100.100.100 i.e
|
||||
|
||||
```
|
||||
data:
|
||||
stubDomains: |
|
||||
{
|
||||
"ts.net": [
|
||||
"100.100.100.100"
|
||||
]
|
||||
}
|
||||
```
|
||||
^ for kube-dns
|
||||
|
||||
See CoreDNS example in https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress#expose-a-tailnet-https-service-to-your-cluster-workloads
|
||||
|
||||
- any Pod in cluster should now be able to access any tailnet service by ts.net DNS name
|
||||
|
||||
## Caveats
|
||||
|
||||
!!! I have only tested this on GKE with kube-dns
|
||||
|
||||
Also:
|
||||
|
||||
- a Tailscale DaemonSet is needed which will likely make resource consumption too high for many-node cluster
|
||||
- only works on hosts that support iptables
|
||||
- will not work with GCP CloudDNS or any other DNS service that is outside cluster/cannot route to Pods
|
||||
|
||||
## How it works:
|
||||
|
||||
- creates a DaemonSet that runs Tailscale (NOT on host network)
|
||||
|
||||
- the DaemonSet has a single container that runs Tailscale and an init container
|
||||
|
||||
- the init container for each DaemonSet's Pod creates a Job that runs once on the Pod's node and sets up route to route 100.64.0.0/10 to this Pod
|
||||
|
||||
- the container runs updated containerboot that runs ARP resolver in a loop and responds to ARP requests for IPs in 100.64.0.0/10 range with the Pod's MAC address
|
||||
|
||||
## Next steps:
|
||||
|
||||
- try to figure out if the same can be achieved with a smaller number of Tailscale Pods. The problem there is how to set up routing to Pods across hosts
|
||||
@@ -111,6 +111,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mdlayher/arp"
|
||||
"golang.org/x/sys/unix"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn"
|
||||
@@ -336,6 +337,11 @@ authLoop:
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.EgressRange != "" {
|
||||
log.Printf("egress range is set")
|
||||
go runARP(cfg.EgressRange)
|
||||
}
|
||||
|
||||
// Setup for proxies that are configured to proxy to a target specified
|
||||
// by a DNS name (TS_EXPERIMENTAL_DEST_DNS_NAME).
|
||||
const defaultCheckPeriod = time.Minute * 10 // how often to check what IPs the DNS name resolves to
|
||||
@@ -517,6 +523,30 @@ runLoop:
|
||||
log.Fatalf("installing egress proxy rules: %v", err)
|
||||
}
|
||||
}
|
||||
if cfg.EgressRange != "" && ipsHaveChanged && len(addrs) != 0 {
|
||||
log.Printf("Installing SNAT for %s", cfg.EgressRange)
|
||||
dst, err := netip.ParsePrefix(cfg.EgressRange)
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing dst range %v", err)
|
||||
}
|
||||
var local netip.Addr
|
||||
for _, pfx := range addrs {
|
||||
if !pfx.IsSingleIP() {
|
||||
continue
|
||||
}
|
||||
if pfx.Addr().Is4() != dst.Addr().Is4() {
|
||||
continue
|
||||
}
|
||||
local = pfx.Addr()
|
||||
break
|
||||
}
|
||||
if !local.IsValid() {
|
||||
log.Fatalf("no tailscale IP matching family of %s found in %v", dst, addrs)
|
||||
}
|
||||
if err := nfr.EnsureSNATForRange(local, dst); err != nil {
|
||||
log.Fatalf("installing egress proxy rules: %v", err)
|
||||
}
|
||||
}
|
||||
// If this is a L7 cluster ingress proxy (set up
|
||||
// by Kubernetes operator) and proxying of
|
||||
// cluster traffic to the ingress target is
|
||||
@@ -744,3 +774,56 @@ func tailscaledConfigFilePath() string {
|
||||
log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion)
|
||||
return path.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer))
|
||||
}
|
||||
|
||||
func runARP(r string) {
|
||||
log.Printf("running ARP client")
|
||||
ifs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
log.Fatalf("error listing interfaces: %v", err)
|
||||
}
|
||||
advertizedRange, err := netip.ParsePrefix(r)
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing range %s: %v", r, err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing IP: %v", err)
|
||||
}
|
||||
var veth net.Interface
|
||||
for _, i := range ifs {
|
||||
log.Printf("looking at interface %s", i.Name)
|
||||
if strings.EqualFold(i.Name, "lo") || strings.EqualFold(i.Name, "tailscale0") {
|
||||
continue
|
||||
}
|
||||
log.Printf("picked interface %v", i.Name)
|
||||
if err != nil {
|
||||
log.Fatalf("error retrieving interface addrs: %v", err)
|
||||
}
|
||||
veth = i
|
||||
break
|
||||
}
|
||||
client, err := arp.Dial(&veth)
|
||||
if err != nil {
|
||||
log.Fatalf("error creating ARP client: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
log.Printf("Waiting for ARP packets")
|
||||
packet, _, err := client.Read()
|
||||
if err != nil {
|
||||
log.Fatalf("error reading ARP packets: %v", err)
|
||||
}
|
||||
log.Printf("got an ARP packet for operation %v address %v from %s", packet.Operation.String(), packet.TargetIP.String(), packet.SenderIP.String())
|
||||
if packet.Operation != arp.OperationRequest {
|
||||
log.Printf("not an ARP request")
|
||||
continue
|
||||
}
|
||||
// if !advertizedRange.Contains(packet.TargetIP) && !strings.EqualFold(packet.TargetIP.String(), ipAddr.String()) {
|
||||
if !advertizedRange.Contains(packet.TargetIP) {
|
||||
log.Printf("not in range")
|
||||
continue
|
||||
}
|
||||
if err := client.Reply(packet, client.HardwareAddr(), packet.TargetIP); err != nil {
|
||||
log.Printf("error replying to ARP request: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ type settings struct {
|
||||
PodIPv6 string
|
||||
HealthCheckAddrPort string
|
||||
EgressSvcsCfgPath string
|
||||
EgressRange string
|
||||
}
|
||||
|
||||
func configFromEnv() (*settings, error) {
|
||||
@@ -99,6 +100,7 @@ func configFromEnv() (*settings, error) {
|
||||
EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false),
|
||||
HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""),
|
||||
EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""),
|
||||
EgressRange: defaultEnv("TS_EGRESS_RANGE", ""),
|
||||
}
|
||||
podIPs, ok := os.LookupEnv("POD_IPS")
|
||||
if ok {
|
||||
@@ -263,7 +265,7 @@ func isOneStepConfig(cfg *settings) bool {
|
||||
// as an L3 proxy, proxying to an endpoint provided via one of the config env
|
||||
// vars.
|
||||
func isL3Proxy(cfg *settings) bool {
|
||||
return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressSvcsCfgPath != ""
|
||||
return cfg.EgressRange != "" || cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressSvcsCfgPath != ""
|
||||
}
|
||||
|
||||
// hasKubeStateStore returns true if the state must be stored in a Kubernetes
|
||||
|
||||
Reference in New Issue
Block a user