2023-01-27 21:37:20 +00:00
|
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-09-03 22:45:41 +00:00
|
|
|
|
|
|
|
|
|
// Package netstack wires up gVisor's netstack into Tailscale.
|
|
|
|
|
package netstack
|
|
|
|
|
|
|
|
|
|
import (
|
2023-02-02 00:29:05 +00:00
|
|
|
|
"bytes"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"context"
|
|
|
|
|
"errors"
|
2024-01-19 23:06:55 +00:00
|
|
|
|
"expvar"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"fmt"
|
2021-02-25 19:18:16 +00:00
|
|
|
|
"io"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"log"
|
2024-01-19 23:06:55 +00:00
|
|
|
|
"math"
|
2021-02-26 21:39:48 +00:00
|
|
|
|
"net"
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
"net/netip"
|
2021-12-09 00:44:11 +00:00
|
|
|
|
"runtime"
|
2021-02-26 22:06:00 +00:00
|
|
|
|
"strconv"
|
|
|
|
|
"sync"
|
2021-05-05 19:32:46 +00:00
|
|
|
|
"sync/atomic"
|
2021-03-08 18:22:35 +00:00
|
|
|
|
"time"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
|
2023-09-06 09:45:52 +00:00
|
|
|
|
"gvisor.dev/gvisor/pkg/buffer"
|
2022-04-08 00:21:45 +00:00
|
|
|
|
"gvisor.dev/gvisor/pkg/refs"
|
2022-01-26 04:06:00 +00:00
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/header"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
|
|
|
|
"gvisor.dev/gvisor/pkg/waiter"
|
2024-04-02 20:32:30 +00:00
|
|
|
|
"tailscale.com/drive"
|
2022-01-24 18:52:57 +00:00
|
|
|
|
"tailscale.com/envknob"
|
2021-08-26 21:50:55 +00:00
|
|
|
|
"tailscale.com/ipn/ipnlocal"
|
2024-01-19 23:06:55 +00:00
|
|
|
|
"tailscale.com/metrics"
|
2022-04-14 21:55:23 +00:00
|
|
|
|
"tailscale.com/net/dns"
|
2022-07-25 03:08:42 +00:00
|
|
|
|
"tailscale.com/net/netaddr"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"tailscale.com/net/packet"
|
2021-03-29 18:33:05 +00:00
|
|
|
|
"tailscale.com/net/tsaddr"
|
2021-11-30 23:53:34 +00:00
|
|
|
|
"tailscale.com/net/tsdial"
|
2021-03-27 05:14:08 +00:00
|
|
|
|
"tailscale.com/net/tstun"
|
2023-09-13 18:38:05 +00:00
|
|
|
|
"tailscale.com/proxymap"
|
2021-12-09 00:44:11 +00:00
|
|
|
|
"tailscale.com/syncs"
|
2023-09-12 20:37:51 +00:00
|
|
|
|
"tailscale.com/tailcfg"
|
2021-08-26 21:50:55 +00:00
|
|
|
|
"tailscale.com/types/ipproto"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"tailscale.com/types/logger"
|
2021-02-05 23:44:46 +00:00
|
|
|
|
"tailscale.com/types/netmap"
|
2023-03-05 16:50:20 +00:00
|
|
|
|
"tailscale.com/types/nettype"
|
2024-02-26 14:05:18 +00:00
|
|
|
|
"tailscale.com/util/clientmetric"
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
"tailscale.com/version"
|
2020-09-03 22:45:41 +00:00
|
|
|
|
"tailscale.com/wgengine"
|
|
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
|
"tailscale.com/wgengine/magicsock"
|
|
|
|
|
)
|
|
|
|
|
|
2022-01-07 00:07:50 +00:00
|
|
|
|
const debugPackets = false
|
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
// If non-zero, these override the values returned from the corresponding
|
|
|
|
|
// functions, below.
|
|
|
|
|
var (
|
|
|
|
|
maxInFlightConnectionAttemptsForTest int
|
|
|
|
|
maxInFlightConnectionAttemptsPerClientForTest int
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// maxInFlightConnectionAttempts returns the global number of in-flight
|
|
|
|
|
// connection attempts that we allow for a single netstack Impl. Any new
|
|
|
|
|
// forwarded TCP connections that are opened after the limit has been hit are
|
|
|
|
|
// rejected until the number of in-flight connections drops below the limit
|
|
|
|
|
// again.
|
|
|
|
|
//
|
|
|
|
|
// Each in-flight connection attempt is a new goroutine and an open TCP
|
|
|
|
|
// connection, so we want to ensure that we don't allow an unbounded number of
|
|
|
|
|
// connections.
|
|
|
|
|
func maxInFlightConnectionAttempts() int {
|
|
|
|
|
if n := maxInFlightConnectionAttemptsForTest; n > 0 {
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if version.IsMobile() {
|
|
|
|
|
return 1024 // previous global value
|
|
|
|
|
}
|
|
|
|
|
switch version.OS() {
|
|
|
|
|
case "linux":
|
|
|
|
|
// On the assumption that most subnet routers deployed in
|
|
|
|
|
// production are running on Linux, we return a higher value.
|
|
|
|
|
//
|
|
|
|
|
// TODO(andrew-d): tune this based on the amount of system
|
|
|
|
|
// memory instead of a fixed limit.
|
|
|
|
|
return 8192
|
|
|
|
|
default:
|
|
|
|
|
// On all other platforms, return a reasonably high value that
|
|
|
|
|
// most users won't hit.
|
|
|
|
|
return 2048
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// maxInFlightConnectionAttemptsPerClient is the same as
|
|
|
|
|
// maxInFlightConnectionAttempts, but applies on a per-client basis
|
|
|
|
|
// (i.e. keyed by the remote Tailscale IP).
|
|
|
|
|
func maxInFlightConnectionAttemptsPerClient() int {
|
|
|
|
|
if n := maxInFlightConnectionAttemptsPerClientForTest; n > 0 {
|
|
|
|
|
return n
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For now, allow each individual client at most 2/3rds of the global
|
|
|
|
|
// limit. On all platforms except mobile, this won't be a visible
|
|
|
|
|
// change for users since this limit was added at the same time as we
|
|
|
|
|
// bumped the global limit, above.
|
|
|
|
|
return maxInFlightConnectionAttempts() * 2 / 3
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-14 19:49:39 +00:00
|
|
|
|
var debugNetstack = envknob.RegisterBool("TS_DEBUG_NETSTACK")
|
2021-03-03 18:37:01 +00:00
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
var (
|
2024-02-02 18:45:32 +00:00
|
|
|
|
serviceIP = tsaddr.TailscaleServiceIP()
|
|
|
|
|
serviceIPv6 = tsaddr.TailscaleServiceIPv6()
|
2022-04-14 22:17:26 +00:00
|
|
|
|
)
|
|
|
|
|
|
2022-04-08 00:21:45 +00:00
|
|
|
|
func init() {
|
2022-12-05 08:06:30 +00:00
|
|
|
|
mode := envknob.String("TS_DEBUG_NETSTACK_LEAK_MODE")
|
|
|
|
|
if mode == "" {
|
|
|
|
|
return
|
2022-04-08 00:21:45 +00:00
|
|
|
|
}
|
|
|
|
|
var lm refs.LeakMode
|
2022-12-05 08:06:30 +00:00
|
|
|
|
if err := lm.Set(mode); err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
2022-04-08 00:21:45 +00:00
|
|
|
|
refs.SetLeakMode(lm)
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
// Impl contains the state for the netstack implementation,
|
|
|
|
|
// and implements wgengine.FakeImpl to act as a userspace network
|
|
|
|
|
// stack when Tailscale is running in fake mode.
|
|
|
|
|
type Impl struct {
|
2023-03-07 22:52:06 +00:00
|
|
|
|
// GetTCPHandlerForFlow conditionally handles an incoming TCP flow for the
|
|
|
|
|
// provided (src/port, dst/port) 4-tuple.
|
|
|
|
|
//
|
|
|
|
|
// A nil value is equivalent to a func returning (nil, false).
|
2023-03-05 16:50:20 +00:00
|
|
|
|
//
|
2023-03-07 22:52:06 +00:00
|
|
|
|
// If func returns intercept=false, the default forwarding behavior (if
|
|
|
|
|
// ProcessLocalIPs and/or ProcesssSubnetIPs) takes place.
|
|
|
|
|
//
|
|
|
|
|
// When intercept=true, the behavior depends on whether the returned handler
|
|
|
|
|
// is non-nil: if nil, the connection is rejected. If non-nil, handler takes
|
|
|
|
|
// over the TCP conn.
|
|
|
|
|
GetTCPHandlerForFlow func(src, dst netip.AddrPort) (handler func(net.Conn), intercept bool)
|
2021-05-14 15:53:55 +00:00
|
|
|
|
|
2023-03-05 16:50:20 +00:00
|
|
|
|
// GetUDPHandlerForFlow conditionally handles an incoming UDP flow for the
|
|
|
|
|
// provided (src/port, dst/port) 4-tuple.
|
|
|
|
|
//
|
|
|
|
|
// A nil value is equivalent to a func returning (nil, false).
|
|
|
|
|
//
|
|
|
|
|
// If func returns intercept=false, the default forwarding behavior (if
|
|
|
|
|
// ProcessLocalIPs and/or ProcesssSubnetIPs) takes place.
|
|
|
|
|
//
|
|
|
|
|
// When intercept=true, the behavior depends on whether the returned handler
|
|
|
|
|
// is non-nil: if nil, the connection is rejected. If non-nil, handler takes
|
|
|
|
|
// over the UDP flow.
|
|
|
|
|
GetUDPHandlerForFlow func(src, dst netip.AddrPort) (handler func(nettype.ConnPacketConn), intercept bool)
|
|
|
|
|
|
2021-10-29 23:21:18 +00:00
|
|
|
|
// ProcessLocalIPs is whether netstack should handle incoming
|
|
|
|
|
// traffic directed at the Node.Addresses (local IPs).
|
|
|
|
|
// It can only be set before calling Start.
|
|
|
|
|
ProcessLocalIPs bool
|
|
|
|
|
|
|
|
|
|
// ProcessSubnets is whether netstack should handle incoming
|
|
|
|
|
// traffic destined to non-local IPs (i.e. whether it should
|
|
|
|
|
// be a subnet router).
|
|
|
|
|
// It can only be set before calling Start.
|
|
|
|
|
ProcessSubnets bool
|
|
|
|
|
|
2024-04-03 17:09:58 +00:00
|
|
|
|
ipstack *stack.Stack
|
|
|
|
|
linkEP *channel.Endpoint
|
|
|
|
|
tundev *tstun.Wrapper
|
|
|
|
|
e wgengine.Engine
|
|
|
|
|
pm *proxymap.Mapper
|
|
|
|
|
mc *magicsock.Conn
|
|
|
|
|
logf logger.Logf
|
|
|
|
|
dialer *tsdial.Dialer
|
|
|
|
|
ctx context.Context // alive until Close
|
|
|
|
|
ctxCancel context.CancelFunc // called on Close
|
|
|
|
|
lb *ipnlocal.LocalBackend // or nil
|
|
|
|
|
dns *dns.Manager
|
|
|
|
|
driveForLocal drive.FileSystemForLocal // or nil
|
2022-01-31 17:20:22 +00:00
|
|
|
|
|
2022-12-22 20:53:56 +00:00
|
|
|
|
peerapiPort4Atomic atomic.Uint32 // uint16 port number for IPv4 peerapi
|
|
|
|
|
peerapiPort6Atomic atomic.Uint32 // uint16 port number for IPv6 peerapi
|
2021-02-26 22:06:00 +00:00
|
|
|
|
|
2021-05-05 19:32:46 +00:00
|
|
|
|
// atomicIsLocalIPFunc holds a func that reports whether an IP
|
|
|
|
|
// is a local (non-subnet) Tailscale IP address of this
|
|
|
|
|
// machine. It's always a non-nil func. It's changed on netmap
|
|
|
|
|
// updates.
|
2022-08-04 17:43:49 +00:00
|
|
|
|
atomicIsLocalIPFunc syncs.AtomicValue[func(netip.Addr) bool]
|
2021-05-05 19:32:46 +00:00
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
// forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style
|
|
|
|
|
// function that is used to make outgoing connections when forwarding a
|
|
|
|
|
// TCP connection to another host (e.g. in subnet router mode).
|
|
|
|
|
//
|
|
|
|
|
// This is currently only used in tests.
|
|
|
|
|
forwardDialFunc func(context.Context, string, string) (net.Conn, error)
|
|
|
|
|
|
|
|
|
|
// forwardInFlightPerClientDropped is a metric that tracks how many
|
|
|
|
|
// in-flight TCP forward requests were dropped due to the per-client
|
|
|
|
|
// limit.
|
|
|
|
|
forwardInFlightPerClientDropped expvar.Int
|
|
|
|
|
|
2021-12-02 19:10:35 +00:00
|
|
|
|
mu sync.Mutex
|
2021-03-29 18:33:05 +00:00
|
|
|
|
// connsOpenBySubnetIP keeps track of number of connections open
|
|
|
|
|
// for each subnet IP temporarily registered on netstack for active
|
|
|
|
|
// TCP connections, so they can be unregistered when connections are
|
|
|
|
|
// closed.
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
connsOpenBySubnetIP map[netip.Addr]int
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
// connsInFlightByClient keeps track of the number of in-flight
|
|
|
|
|
// connections by the client ("Tailscale") IP. This is used to apply a
|
|
|
|
|
// per-client limit on in-flight connections that's smaller than the
|
|
|
|
|
// global limit, preventing a misbehaving client from starving the
|
|
|
|
|
// global limit.
|
|
|
|
|
connsInFlightByClient map[netip.Addr]int
|
2024-02-29 04:21:31 +00:00
|
|
|
|
// packetsInFlight tracks whether we're already handling a packet by
|
|
|
|
|
// the given endpoint ID; clients can send repeated SYN packets while
|
|
|
|
|
// trying to establish a connection (and while we're dialing the
|
|
|
|
|
// upstream address). If we don't deduplicate based on the endpoint,
|
|
|
|
|
// each SYN retransmit results in us incrementing
|
|
|
|
|
// connsInFlightByClient, and not decrementing them because the
|
|
|
|
|
// underlying TCP forwarder returns 'true' to indicate that the packet
|
|
|
|
|
// is handled but never actually launches our acceptTCP function.
|
|
|
|
|
//
|
|
|
|
|
// This mimics the 'inFlight' map in the TCP forwarder; it's
|
|
|
|
|
// unfortunate that we have to track this all twice, but thankfully the
|
|
|
|
|
// map only holds pending (in-flight) packets, and it's reasonably cheap.
|
|
|
|
|
packetsInFlight map[stack.TransportEndpointID]struct{}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const nicID = 1
|
|
|
|
|
|
2023-09-22 15:49:09 +00:00
|
|
|
|
// maxUDPPacketSize is the maximum size of a UDP packet we copy in
|
|
|
|
|
// startPacketCopy when relaying UDP packets. The user can configure
|
|
|
|
|
// the tailscale MTU to anything up to this size so we can potentially
|
|
|
|
|
// have a UDP packet as big as the MTU.
|
|
|
|
|
const maxUDPPacketSize = tstun.MaxPacketSize
|
2022-06-02 15:17:12 +00:00
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
// Create creates and populates a new Impl.
|
2024-04-03 17:09:58 +00:00
|
|
|
|
func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) {
|
2020-09-03 22:45:41 +00:00
|
|
|
|
if mc == nil {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return nil, errors.New("nil magicsock.Conn")
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
if tundev == nil {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return nil, errors.New("nil tundev")
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
if logf == nil {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return nil, errors.New("nil logger")
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
if e == nil {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return nil, errors.New("nil Engine")
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
2023-09-13 18:38:05 +00:00
|
|
|
|
if pm == nil {
|
|
|
|
|
return nil, errors.New("nil proxymap.Mapper")
|
|
|
|
|
}
|
2021-12-02 19:10:35 +00:00
|
|
|
|
if dialer == nil {
|
|
|
|
|
return nil, errors.New("nil Dialer")
|
|
|
|
|
}
|
2020-09-03 22:45:41 +00:00
|
|
|
|
ipstack := stack.New(stack.Options{
|
2021-02-25 19:18:16 +00:00
|
|
|
|
NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
|
|
|
|
|
TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol, icmp.NewProtocol4, icmp.NewProtocol6},
|
2020-09-03 22:45:41 +00:00
|
|
|
|
})
|
2022-10-25 23:09:20 +00:00
|
|
|
|
sackEnabledOpt := tcpip.TCPSACKEnabled(true) // TCP SACK is disabled by default
|
|
|
|
|
tcpipErr := ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &sackEnabledOpt)
|
|
|
|
|
if tcpipErr != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not enable TCP SACK: %v", tcpipErr)
|
|
|
|
|
}
|
2023-11-28 20:12:32 +00:00
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
|
// See https://github.com/tailscale/tailscale/issues/9707
|
|
|
|
|
// Windows w/RACK performs poorly. ACKs do not appear to be handled in a
|
|
|
|
|
// timely manner, leading to spurious retransmissions and a reduced
|
|
|
|
|
// congestion window.
|
|
|
|
|
tcpRecoveryOpt := tcpip.TCPRecovery(0)
|
|
|
|
|
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt)
|
|
|
|
|
if tcpipErr != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr)
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-09-22 15:49:09 +00:00
|
|
|
|
linkEP := channel.New(512, uint32(tstun.DefaultTUNMTU()), "")
|
2021-02-25 19:18:16 +00:00
|
|
|
|
if tcpipProblem := ipstack.CreateNIC(nicID, linkEP); tcpipProblem != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not create netstack NIC: %v", tcpipProblem)
|
|
|
|
|
}
|
2021-03-29 18:33:05 +00:00
|
|
|
|
// By default the netstack NIC will only accept packets for the IPs
|
|
|
|
|
// registered to it. Since in some cases we dynamically register IPs
|
|
|
|
|
// based on the packets that arrive, the NIC needs to accept all
|
|
|
|
|
// incoming packets. The NIC won't receive anything it isn't meant to
|
2022-05-04 19:10:17 +00:00
|
|
|
|
// since WireGuard will only send us packets that are meant for us.
|
2021-03-29 18:33:05 +00:00
|
|
|
|
ipstack.SetPromiscuousMode(nicID, true)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
// Add IPv4 and IPv6 default routes, so all incoming packets from the Tailscale side
|
|
|
|
|
// are handled by the one fake NIC we use.
|
2023-09-07 09:41:56 +00:00
|
|
|
|
ipv4Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 4)), tcpip.MaskFromBytes(make([]byte, 4)))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not create IPv4 subnet: %v", err)
|
|
|
|
|
}
|
|
|
|
|
ipv6Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 16)), tcpip.MaskFromBytes(make([]byte, 16)))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not create IPv6 subnet: %v", err)
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
ipstack.SetRouteTable([]tcpip.Route{
|
|
|
|
|
{
|
|
|
|
|
Destination: ipv4Subnet,
|
|
|
|
|
NIC: nicID,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Destination: ipv6Subnet,
|
|
|
|
|
NIC: nicID,
|
|
|
|
|
},
|
|
|
|
|
})
|
|
|
|
|
ns := &Impl{
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
logf: logf,
|
|
|
|
|
ipstack: ipstack,
|
|
|
|
|
linkEP: linkEP,
|
|
|
|
|
tundev: tundev,
|
|
|
|
|
e: e,
|
|
|
|
|
pm: pm,
|
|
|
|
|
mc: mc,
|
|
|
|
|
dialer: dialer,
|
|
|
|
|
connsOpenBySubnetIP: make(map[netip.Addr]int),
|
|
|
|
|
connsInFlightByClient: make(map[netip.Addr]int),
|
2024-02-29 04:21:31 +00:00
|
|
|
|
packetsInFlight: make(map[stack.TransportEndpointID]struct{}),
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
dns: dns,
|
2024-04-03 17:09:58 +00:00
|
|
|
|
driveForLocal: driveForLocal,
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2022-01-19 20:04:56 +00:00
|
|
|
|
ns.ctx, ns.ctxCancel = context.WithCancel(context.Background())
|
2023-09-18 06:31:34 +00:00
|
|
|
|
ns.atomicIsLocalIPFunc.Store(tsaddr.FalseContainsIPFunc())
|
2024-02-28 04:25:36 +00:00
|
|
|
|
ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound
|
2023-04-10 04:02:39 +00:00
|
|
|
|
ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets
|
2024-02-26 14:05:18 +00:00
|
|
|
|
stacksForMetrics.Store(ns, struct{}{})
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return ns, nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 20:04:56 +00:00
|
|
|
|
func (ns *Impl) Close() error {
|
2024-02-26 14:05:18 +00:00
|
|
|
|
stacksForMetrics.Delete(ns)
|
2022-01-19 20:04:56 +00:00
|
|
|
|
ns.ctxCancel()
|
2022-06-28 17:28:19 +00:00
|
|
|
|
ns.ipstack.Close()
|
2023-03-27 18:37:50 +00:00
|
|
|
|
ns.ipstack.Wait()
|
2022-01-19 20:04:56 +00:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-26 14:05:18 +00:00
|
|
|
|
// A single process might have several netstacks running at the same time.
|
|
|
|
|
// Exported clientmetric counters will have a sum of counters of all of them.
|
|
|
|
|
var stacksForMetrics syncs.Map[*Impl, struct{}]
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
|
// Please take care to avoid exporting clientmetrics with the same metric
|
|
|
|
|
// names as the ones used by Impl.ExpVar. Both get exposed via the same HTTP
|
|
|
|
|
// endpoint, and name collisions will result in Prometheus scraping errors.
|
|
|
|
|
clientmetric.NewCounterFunc("netstack_tcp_forward_dropped_attempts", func() int64 {
|
|
|
|
|
var total uint64
|
|
|
|
|
stacksForMetrics.Range(func(ns *Impl, _ struct{}) bool {
|
|
|
|
|
delta := ns.ipstack.Stats().TCP.ForwardMaxInFlightDrop.Value()
|
|
|
|
|
if total+delta > math.MaxInt64 {
|
|
|
|
|
total = math.MaxInt64
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
total += delta
|
|
|
|
|
return true
|
|
|
|
|
})
|
|
|
|
|
return int64(total)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-07 00:56:02 +00:00
|
|
|
|
type protocolHandlerFunc func(stack.TransportEndpointID, *stack.PacketBuffer) bool
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
|
|
|
|
|
// wrapUDPProtocolHandler wraps the protocol handler we pass to netstack for UDP.
|
|
|
|
|
func (ns *Impl) wrapUDPProtocolHandler(h protocolHandlerFunc) protocolHandlerFunc {
|
2024-03-07 00:56:02 +00:00
|
|
|
|
return func(tei stack.TransportEndpointID, pb *stack.PacketBuffer) bool {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
addr := tei.LocalAddress
|
2023-09-07 09:41:56 +00:00
|
|
|
|
ip, ok := netip.AddrFromSlice(addr.AsSlice())
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if !ok {
|
|
|
|
|
ns.logf("netstack: could not parse local address for incoming connection")
|
|
|
|
|
return false
|
|
|
|
|
}
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
|
|
|
|
|
// Dynamically reconfigure ns's subnet addresses as needed for
|
|
|
|
|
// outbound traffic.
|
2022-08-02 20:38:11 +00:00
|
|
|
|
ip = ip.Unmap()
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if !ns.isLocalIP(ip) {
|
|
|
|
|
ns.addSubnetAddress(ip)
|
|
|
|
|
}
|
|
|
|
|
return h(tei, pb)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
var (
|
|
|
|
|
metricPerClientForwardLimit = clientmetric.NewCounter("netstack_tcp_forward_dropped_attempts_per_client")
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// wrapTCPProtocolHandler wraps the protocol handler we pass to netstack for TCP.
|
|
|
|
|
func (ns *Impl) wrapTCPProtocolHandler(h protocolHandlerFunc) protocolHandlerFunc {
|
|
|
|
|
// 'handled' is whether the packet should be accepted by netstack; if
|
|
|
|
|
// true, then the TCP connection is accepted by the transport layer and
|
|
|
|
|
// passes through our acceptTCP handler/etc. If false, then the packet
|
|
|
|
|
// is dropped and the TCP connection is rejected (typically with an
|
|
|
|
|
// ICMP Port Unreachable or ICMP Protocol Unreachable message).
|
2024-03-07 00:56:02 +00:00
|
|
|
|
return func(tei stack.TransportEndpointID, pb *stack.PacketBuffer) (handled bool) {
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
localIP, ok := netip.AddrFromSlice(tei.LocalAddress.AsSlice())
|
|
|
|
|
if !ok {
|
|
|
|
|
ns.logf("netstack: could not parse local address for incoming connection")
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
localIP = localIP.Unmap()
|
|
|
|
|
|
|
|
|
|
remoteIP, ok := netip.AddrFromSlice(tei.RemoteAddress.AsSlice())
|
|
|
|
|
if !ok {
|
|
|
|
|
ns.logf("netstack: could not parse remote address for incoming connection")
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we have too many in-flight connections for this client, abort
|
|
|
|
|
// early and don't open a new one.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: the counter is decremented in
|
|
|
|
|
// decrementInFlightTCPForward, called from the acceptTCP
|
|
|
|
|
// function, below.
|
2024-02-29 04:21:31 +00:00
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.mu.Lock()
|
2024-02-29 04:21:31 +00:00
|
|
|
|
if _, ok := ns.packetsInFlight[tei]; ok {
|
|
|
|
|
// We're already handling this packet; just bail early
|
|
|
|
|
// (this is also what would happen in the TCP
|
|
|
|
|
// forwarder).
|
|
|
|
|
ns.mu.Unlock()
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check the per-client limit.
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
inFlight := ns.connsInFlightByClient[remoteIP]
|
|
|
|
|
tooManyInFlight := inFlight >= maxInFlightConnectionAttemptsPerClient()
|
|
|
|
|
if !tooManyInFlight {
|
|
|
|
|
ns.connsInFlightByClient[remoteIP]++
|
|
|
|
|
}
|
2024-02-29 04:21:31 +00:00
|
|
|
|
|
|
|
|
|
// We're handling this packet now; see the comment on the
|
|
|
|
|
// packetsInFlight field for more details.
|
|
|
|
|
ns.packetsInFlight[tei] = struct{}{}
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.mu.Unlock()
|
2024-02-29 04:21:31 +00:00
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
if debugNetstack() {
|
|
|
|
|
ns.logf("[v2] netstack: in-flight connections for client %v: %d", remoteIP, inFlight)
|
|
|
|
|
}
|
|
|
|
|
if tooManyInFlight {
|
|
|
|
|
ns.logf("netstack: ignoring a new TCP connection from %v to %v because the client already has %d in-flight connections", localIP, remoteIP, inFlight)
|
|
|
|
|
metricPerClientForwardLimit.Add(1)
|
|
|
|
|
ns.forwardInFlightPerClientDropped.Add(1)
|
|
|
|
|
return false // unhandled
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// On return, if this packet isn't handled by the inner handler
|
|
|
|
|
// we're wrapping (`h`), we need to decrement the per-client
|
2024-02-29 04:21:31 +00:00
|
|
|
|
// in-flight count and remove the ID from our tracking map.
|
|
|
|
|
// This can happen if the underlying forwarder's limit has been
|
|
|
|
|
// reached, at which point it will return false to indicate
|
|
|
|
|
// that it's not handling the packet, and it will not run
|
|
|
|
|
// acceptTCP. If we don't decrement here, then we would
|
|
|
|
|
// eventually increment the per-client counter up to the limit
|
|
|
|
|
// and never decrement because we'd never hit the codepath in
|
|
|
|
|
// acceptTCP, below, or just drop all packets from the same
|
|
|
|
|
// endpoint due to the packetsInFlight check.
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
if !handled {
|
|
|
|
|
ns.mu.Lock()
|
2024-02-29 04:21:31 +00:00
|
|
|
|
delete(ns.packetsInFlight, tei)
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.connsInFlightByClient[remoteIP]--
|
2024-02-29 04:21:31 +00:00
|
|
|
|
new := ns.connsInFlightByClient[remoteIP]
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.mu.Unlock()
|
2024-02-29 04:21:31 +00:00
|
|
|
|
ns.logf("netstack: decrementing connsInFlightByClient[%v] because the packet was not handled; new value is %d", remoteIP, new)
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
// Dynamically reconfigure ns's subnet addresses as needed for
|
|
|
|
|
// outbound traffic.
|
|
|
|
|
if !ns.isLocalIP(localIP) {
|
|
|
|
|
ns.addSubnetAddress(localIP)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return h(tei, pb)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-29 04:21:31 +00:00
|
|
|
|
func (ns *Impl) decrementInFlightTCPForward(tei stack.TransportEndpointID, remoteAddr netip.Addr) {
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
defer ns.mu.Unlock()
|
|
|
|
|
|
2024-02-29 04:21:31 +00:00
|
|
|
|
// Remove this packet so future SYNs from this address will be handled.
|
|
|
|
|
delete(ns.packetsInFlight, tei)
|
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
was := ns.connsInFlightByClient[remoteAddr]
|
|
|
|
|
newVal := was - 1
|
|
|
|
|
if newVal == 0 {
|
|
|
|
|
delete(ns.connsInFlightByClient, remoteAddr) // free up space in the map
|
|
|
|
|
} else {
|
|
|
|
|
ns.connsInFlightByClient[remoteAddr] = newVal
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
// Start sets up all the handlers so netstack can start working. Implements
|
|
|
|
|
// wgengine.FakeImpl.
|
2022-12-23 18:22:39 +00:00
|
|
|
|
func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error {
|
|
|
|
|
if lb == nil {
|
|
|
|
|
panic("nil LocalBackend")
|
|
|
|
|
}
|
|
|
|
|
ns.lb = lb
|
2021-02-25 19:18:16 +00:00
|
|
|
|
// size = 0 means use default buffer size
|
|
|
|
|
const tcpReceiveBufferSize = 0
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
tcpFwd := tcp.NewForwarder(ns.ipstack, tcpReceiveBufferSize, maxInFlightConnectionAttempts(), ns.acceptTCP)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDP)
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket))
|
|
|
|
|
ns.ipstack.SetTransportProtocolHandler(udp.ProtocolNumber, ns.wrapUDPProtocolHandler(udpFwd.HandlePacket))
|
2022-04-14 22:17:26 +00:00
|
|
|
|
go ns.inject()
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) addSubnetAddress(ip netip.Addr) {
|
2021-03-29 18:33:05 +00:00
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
ns.connsOpenBySubnetIP[ip]++
|
|
|
|
|
needAdd := ns.connsOpenBySubnetIP[ip] == 1
|
|
|
|
|
ns.mu.Unlock()
|
|
|
|
|
// Only register address into netstack for first concurrent connection.
|
|
|
|
|
if needAdd {
|
2021-11-09 02:51:03 +00:00
|
|
|
|
pa := tcpip.ProtocolAddress{
|
2023-09-06 09:45:52 +00:00
|
|
|
|
AddressWithPrefix: tcpip.AddrFromSlice(ip.AsSlice()).WithPrefix(),
|
2021-11-09 02:51:03 +00:00
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if ip.Is4() {
|
2021-11-09 02:51:03 +00:00
|
|
|
|
pa.Protocol = ipv4.ProtocolNumber
|
2021-07-21 15:38:13 +00:00
|
|
|
|
} else if ip.Is6() {
|
2021-11-09 02:51:03 +00:00
|
|
|
|
pa.Protocol = ipv6.ProtocolNumber
|
2021-07-21 15:38:13 +00:00
|
|
|
|
}
|
2021-11-09 02:51:03 +00:00
|
|
|
|
ns.ipstack.AddProtocolAddress(nicID, pa, stack.AddressProperties{
|
|
|
|
|
PEB: stack.CanBePrimaryEndpoint, // zero value default
|
|
|
|
|
ConfigType: stack.AddressConfigStatic, // zero value default
|
|
|
|
|
})
|
2021-03-29 18:33:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) removeSubnetAddress(ip netip.Addr) {
|
2021-03-29 18:33:05 +00:00
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
defer ns.mu.Unlock()
|
|
|
|
|
ns.connsOpenBySubnetIP[ip]--
|
|
|
|
|
// Only unregister address from netstack after last concurrent connection.
|
|
|
|
|
if ns.connsOpenBySubnetIP[ip] == 0 {
|
2023-09-06 09:45:52 +00:00
|
|
|
|
ns.ipstack.RemoveAddress(nicID, tcpip.AddrFromSlice(ip.AsSlice()))
|
2021-03-29 18:33:05 +00:00
|
|
|
|
delete(ns.connsOpenBySubnetIP, ip)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func ipPrefixToAddressWithPrefix(ipp netip.Prefix) tcpip.AddressWithPrefix {
|
2021-03-29 18:33:05 +00:00
|
|
|
|
return tcpip.AddressWithPrefix{
|
2023-09-06 09:45:52 +00:00
|
|
|
|
Address: tcpip.AddrFromSlice(ipp.Addr().AsSlice()),
|
2021-05-15 01:07:28 +00:00
|
|
|
|
PrefixLen: int(ipp.Bits()),
|
2021-03-29 18:33:05 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-16 21:55:41 +00:00
|
|
|
|
var v4broadcast = netaddr.IPv4(255, 255, 255, 255)
|
|
|
|
|
|
2023-09-12 20:37:51 +00:00
|
|
|
|
// UpdateNetstackIPs updates the set of local IPs that netstack should handle
|
|
|
|
|
// from nm.
|
|
|
|
|
//
|
|
|
|
|
// TODO(bradfitz): don't pass the whole netmap here; just pass the two
|
|
|
|
|
// address slice views.
|
|
|
|
|
func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
|
|
|
|
|
var selfNode tailcfg.NodeView
|
|
|
|
|
if nm != nil {
|
2023-09-18 06:31:34 +00:00
|
|
|
|
ns.atomicIsLocalIPFunc.Store(tsaddr.NewContainsIPFunc(nm.GetAddresses()))
|
2023-09-12 20:37:51 +00:00
|
|
|
|
selfNode = nm.SelfNode
|
|
|
|
|
} else {
|
2023-09-18 06:31:34 +00:00
|
|
|
|
ns.atomicIsLocalIPFunc.Store(tsaddr.FalseContainsIPFunc())
|
2023-09-12 20:37:51 +00:00
|
|
|
|
}
|
2021-02-26 22:06:00 +00:00
|
|
|
|
|
2023-10-24 15:26:08 +00:00
|
|
|
|
oldPfx := make(map[netip.Prefix]bool)
|
2021-03-29 18:33:05 +00:00
|
|
|
|
for _, protocolAddr := range ns.ipstack.AllAddresses()[nicID] {
|
2021-12-16 21:55:41 +00:00
|
|
|
|
ap := protocolAddr.AddressWithPrefix
|
|
|
|
|
ip := netaddrIPFromNetstackIP(ap.Address)
|
|
|
|
|
if ip == v4broadcast && ap.PrefixLen == 32 {
|
2021-08-26 21:50:55 +00:00
|
|
|
|
// Don't add 255.255.255.255/32 to oldIPs so we don't
|
|
|
|
|
// delete it later. We didn't install it, so it's not
|
|
|
|
|
// ours to delete.
|
2021-12-16 21:55:41 +00:00
|
|
|
|
continue
|
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
p := netip.PrefixFrom(ip, ap.PrefixLen)
|
|
|
|
|
oldPfx[p] = true
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
newPfx := make(map[netip.Prefix]bool)
|
2021-04-01 16:35:41 +00:00
|
|
|
|
|
2023-09-12 20:37:51 +00:00
|
|
|
|
if selfNode.Valid() {
|
2024-02-25 15:57:11 +00:00
|
|
|
|
for i := range selfNode.Addresses().Len() {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
p := selfNode.Addresses().At(i)
|
|
|
|
|
newPfx[p] = true
|
2021-12-15 23:55:02 +00:00
|
|
|
|
}
|
2023-10-24 15:28:33 +00:00
|
|
|
|
if ns.ProcessSubnets {
|
2024-02-25 15:57:11 +00:00
|
|
|
|
for i := range selfNode.AllowedIPs().Len() {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
p := selfNode.AllowedIPs().At(i)
|
|
|
|
|
newPfx[p] = true
|
2021-12-15 23:55:02 +00:00
|
|
|
|
}
|
2021-04-01 16:35:41 +00:00
|
|
|
|
}
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
2023-10-24 15:26:08 +00:00
|
|
|
|
pfxToAdd := make(map[netip.Prefix]bool)
|
|
|
|
|
for p := range newPfx {
|
|
|
|
|
if !oldPfx[p] {
|
|
|
|
|
pfxToAdd[p] = true
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
pfxToRemove := make(map[netip.Prefix]bool)
|
|
|
|
|
for p := range oldPfx {
|
|
|
|
|
if !newPfx[p] {
|
|
|
|
|
pfxToRemove[p] = true
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2021-03-29 18:33:05 +00:00
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
for ip := range ns.connsOpenBySubnetIP {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
// TODO(maisem): this looks like a bug, remove or document. It seems as
|
|
|
|
|
// though we might end up either leaking the address on the netstack
|
|
|
|
|
// NIC, or where we do accounting for connsOpenBySubnetIP from 1 to 0,
|
|
|
|
|
// we might end up removing the address from the netstack NIC that was
|
|
|
|
|
// still being advertised.
|
|
|
|
|
delete(pfxToRemove, netip.PrefixFrom(ip, ip.BitLen()))
|
2021-03-29 18:33:05 +00:00
|
|
|
|
}
|
|
|
|
|
ns.mu.Unlock()
|
2021-01-15 14:16:28 +00:00
|
|
|
|
|
2023-10-24 15:26:08 +00:00
|
|
|
|
for p := range pfxToRemove {
|
|
|
|
|
err := ns.ipstack.RemoveAddress(nicID, tcpip.AddrFromSlice(p.Addr().AsSlice()))
|
2021-02-25 19:18:16 +00:00
|
|
|
|
if err != nil {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
ns.logf("netstack: could not deregister IP %s: %v", p, err)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
} else {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
ns.logf("[v2] netstack: deregistered IP %s", p)
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
for p := range pfxToAdd {
|
|
|
|
|
if !p.IsValid() {
|
|
|
|
|
ns.logf("netstack: [unexpected] skipping invalid IP (%v/%v)", p.Addr(), p.Bits())
|
2023-10-24 00:30:22 +00:00
|
|
|
|
continue
|
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
tcpAddr := tcpip.ProtocolAddress{
|
|
|
|
|
AddressWithPrefix: ipPrefixToAddressWithPrefix(p),
|
2021-11-09 02:51:03 +00:00
|
|
|
|
}
|
2023-10-24 15:26:08 +00:00
|
|
|
|
if p.Addr().Is6() {
|
|
|
|
|
tcpAddr.Protocol = ipv6.ProtocolNumber
|
2023-10-24 00:30:22 +00:00
|
|
|
|
} else {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
tcpAddr.Protocol = ipv4.ProtocolNumber
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2023-10-24 00:30:22 +00:00
|
|
|
|
var tcpErr tcpip.Error // not error
|
2023-10-24 15:26:08 +00:00
|
|
|
|
tcpErr = ns.ipstack.AddProtocolAddress(nicID, tcpAddr, stack.AddressProperties{
|
2021-11-09 02:51:03 +00:00
|
|
|
|
PEB: stack.CanBePrimaryEndpoint, // zero value default
|
|
|
|
|
ConfigType: stack.AddressConfigStatic, // zero value default
|
|
|
|
|
})
|
2023-10-24 00:30:22 +00:00
|
|
|
|
if tcpErr != nil {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
ns.logf("netstack: could not register IP %s: %v", p, tcpErr)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
} else {
|
2023-10-24 15:26:08 +00:00
|
|
|
|
ns.logf("[v2] netstack: registered IP %s", p)
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// handleLocalPackets is hooked into the tun datapath for packets leaving
|
|
|
|
|
// the host and arriving at tailscaled. This method returns filter.DropSilently
|
|
|
|
|
// to intercept a packet for handling, for instance traffic to quad-100.
|
|
|
|
|
func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Response {
|
2023-03-29 02:10:41 +00:00
|
|
|
|
if ns.ctx.Err() != nil {
|
|
|
|
|
return filter.DropSilently
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-05 22:00:19 +00:00
|
|
|
|
// Determine if we care about this local packet.
|
|
|
|
|
dst := p.Dst.Addr()
|
|
|
|
|
switch {
|
|
|
|
|
case dst == serviceIP || dst == serviceIPv6:
|
|
|
|
|
// We want to intercept some traffic to the "service IP" (e.g.
|
|
|
|
|
// 100.100.100.100 for IPv4). However, of traffic to the
|
|
|
|
|
// service IP, we only care about UDP 53, and TCP on port 53,
|
|
|
|
|
// 80, and 8080.
|
|
|
|
|
switch p.IPProto {
|
|
|
|
|
case ipproto.TCP:
|
|
|
|
|
if port := p.Dst.Port(); port != 53 && port != 80 && port != 8080 {
|
|
|
|
|
return filter.Accept
|
|
|
|
|
}
|
|
|
|
|
case ipproto.UDP:
|
|
|
|
|
if port := p.Dst.Port(); port != 53 {
|
|
|
|
|
return filter.Accept
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
case viaRange.Contains(dst):
|
|
|
|
|
// We need to handle 4via6 packets leaving the host if the via
|
|
|
|
|
// route is for this host; otherwise the packet will be dropped
|
|
|
|
|
// because nothing will translate it.
|
|
|
|
|
var shouldHandle bool
|
|
|
|
|
if p.IPVersion == 6 && !ns.isLocalIP(dst) {
|
|
|
|
|
shouldHandle = ns.lb != nil && ns.lb.ShouldHandleViaIP(dst)
|
2022-05-05 23:42:45 +00:00
|
|
|
|
}
|
2024-05-05 22:00:19 +00:00
|
|
|
|
if !shouldHandle {
|
|
|
|
|
// Unhandled means that we let the regular processing
|
|
|
|
|
// occur without doing anything ourselves.
|
2022-05-05 23:42:45 +00:00
|
|
|
|
return filter.Accept
|
|
|
|
|
}
|
2024-05-05 22:00:19 +00:00
|
|
|
|
|
|
|
|
|
if debugNetstack() {
|
|
|
|
|
ns.logf("netstack: handling local 4via6 packet: version=%d proto=%v dst=%v src=%v",
|
|
|
|
|
p.IPVersion, p.IPProto, p.Dst, p.Src)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If this is a ping message, handle it and don't pass to
|
|
|
|
|
// netstack.
|
|
|
|
|
pingIP, handlePing := ns.shouldHandlePing(p)
|
|
|
|
|
if handlePing {
|
|
|
|
|
ns.logf("netstack: handling local 4via6 ping: dst=%v pingIP=%v", dst, pingIP)
|
|
|
|
|
|
|
|
|
|
var pong []byte // the reply to the ping, if our relayed ping works
|
|
|
|
|
if dst.Is4() {
|
|
|
|
|
h := p.ICMP4Header()
|
|
|
|
|
h.ToResponse()
|
|
|
|
|
pong = packet.Generate(&h, p.Payload())
|
|
|
|
|
} else if dst.Is6() {
|
|
|
|
|
h := p.ICMP6Header()
|
|
|
|
|
h.ToResponse()
|
|
|
|
|
pong = packet.Generate(&h, p.Payload())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
go ns.userPing(pingIP, pong, userPingDirectionInbound)
|
|
|
|
|
return filter.DropSilently
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fall through to writing inbound so netstack handles the
|
|
|
|
|
// 4via6 via connection.
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
// Not traffic to the service IP or a 4via6 IP, so we don't
|
|
|
|
|
// care about the packet; resume processing.
|
|
|
|
|
return filter.Accept
|
2022-05-05 23:42:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
var pn tcpip.NetworkProtocolNumber
|
|
|
|
|
switch p.IPVersion {
|
|
|
|
|
case 4:
|
|
|
|
|
pn = header.IPv4ProtocolNumber
|
|
|
|
|
case 6:
|
|
|
|
|
pn = header.IPv6ProtocolNumber
|
|
|
|
|
}
|
|
|
|
|
if debugPackets {
|
|
|
|
|
ns.logf("[v2] service packet in (from %v): % x", p.Src, p.Buffer())
|
|
|
|
|
}
|
2022-07-21 23:26:02 +00:00
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
2023-09-06 09:45:52 +00:00
|
|
|
|
Payload: buffer.MakeWithData(bytes.Clone(p.Buffer())),
|
2022-04-14 22:17:26 +00:00
|
|
|
|
})
|
|
|
|
|
ns.linkEP.InjectInbound(pn, packetBuf)
|
|
|
|
|
packetBuf.DecRef()
|
|
|
|
|
return filter.DropSilently
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet.TCPConn, error) {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
remoteAddress := tcpip.FullAddress{
|
|
|
|
|
NIC: nicID,
|
2023-09-06 09:45:52 +00:00
|
|
|
|
Addr: tcpip.AddrFromSlice(ipp.Addr().AsSlice()),
|
2021-12-03 16:33:05 +00:00
|
|
|
|
Port: ipp.Port(),
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
|
|
|
|
var ipType tcpip.NetworkProtocolNumber
|
2022-07-25 03:08:42 +00:00
|
|
|
|
if ipp.Addr().Is4() {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
ipType = ipv4.ProtocolNumber
|
|
|
|
|
} else {
|
|
|
|
|
ipType = ipv6.ProtocolNumber
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return gonet.DialContextTCP(ctx, ns.ipstack, remoteAddress, ipType)
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.UDPConn, error) {
|
2021-06-24 05:09:31 +00:00
|
|
|
|
remoteAddress := &tcpip.FullAddress{
|
|
|
|
|
NIC: nicID,
|
2023-09-06 09:45:52 +00:00
|
|
|
|
Addr: tcpip.AddrFromSlice(ipp.Addr().AsSlice()),
|
2021-12-03 16:33:05 +00:00
|
|
|
|
Port: ipp.Port(),
|
2021-06-24 05:09:31 +00:00
|
|
|
|
}
|
|
|
|
|
var ipType tcpip.NetworkProtocolNumber
|
2022-07-25 03:08:42 +00:00
|
|
|
|
if ipp.Addr().Is4() {
|
2021-06-24 05:09:31 +00:00
|
|
|
|
ipType = ipv4.ProtocolNumber
|
|
|
|
|
} else {
|
|
|
|
|
ipType = ipv6.ProtocolNumber
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType)
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// The inject goroutine reads in packets that netstack generated, and delivers
|
|
|
|
|
// them to the correct path.
|
|
|
|
|
func (ns *Impl) inject() {
|
2021-02-25 19:18:16 +00:00
|
|
|
|
for {
|
2022-01-26 04:06:00 +00:00
|
|
|
|
pkt := ns.linkEP.ReadContext(ns.ctx)
|
2022-12-05 08:06:30 +00:00
|
|
|
|
if pkt.IsNil() {
|
2022-01-19 20:04:56 +00:00
|
|
|
|
if ns.ctx.Err() != nil {
|
|
|
|
|
// Return without logging.
|
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
ns.logf("[v2] ReadContext-for-write = ok=false")
|
|
|
|
|
continue
|
2021-01-15 14:16:28 +00:00
|
|
|
|
}
|
2020-09-03 22:45:41 +00:00
|
|
|
|
|
2022-01-07 00:07:50 +00:00
|
|
|
|
if debugPackets {
|
2024-05-05 22:00:19 +00:00
|
|
|
|
ns.logf("[v2] packet Write out: % x", stack.PayloadSince(pkt.NetworkHeader()).AsSlice())
|
2021-03-03 18:37:01 +00:00
|
|
|
|
}
|
2022-03-21 21:58:43 +00:00
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// In the normal case, netstack synthesizes the bytes for
|
|
|
|
|
// traffic which should transit back into WG and go to peers.
|
|
|
|
|
// However, some uses of netstack (presently, magic DNS)
|
|
|
|
|
// send traffic destined for the local device, hence must
|
|
|
|
|
// be injected 'inbound'.
|
|
|
|
|
sendToHost := false
|
|
|
|
|
|
|
|
|
|
// Determine if the packet is from a service IP, in which case it
|
|
|
|
|
// needs to go back into the machines network (inbound) instead of
|
|
|
|
|
// out.
|
|
|
|
|
// TODO(tom): Figure out if its safe to modify packet.Parsed to fill in
|
|
|
|
|
// the IP src/dest even if its missing the rest of the pkt.
|
|
|
|
|
// That way we dont have to do this twitchy-af byte-yeeting.
|
2024-05-05 22:00:19 +00:00
|
|
|
|
hdr := pkt.Network()
|
|
|
|
|
switch v := hdr.(type) {
|
|
|
|
|
case header.IPv4:
|
|
|
|
|
srcIP := netip.AddrFrom4(v.SourceAddress().As4())
|
|
|
|
|
if serviceIP == srcIP {
|
|
|
|
|
sendToHost = true
|
|
|
|
|
}
|
|
|
|
|
case header.IPv6:
|
|
|
|
|
srcIP := netip.AddrFrom16(v.SourceAddress().As16())
|
|
|
|
|
if srcIP == serviceIPv6 {
|
|
|
|
|
sendToHost = true
|
|
|
|
|
} else if viaRange.Contains(srcIP) {
|
|
|
|
|
// Only send to the host if this 4via6 route is
|
|
|
|
|
// something this node handles.
|
|
|
|
|
if ns.lb != nil && ns.lb.ShouldHandleViaIP(srcIP) {
|
2022-04-14 22:17:26 +00:00
|
|
|
|
sendToHost = true
|
2024-05-05 22:00:19 +00:00
|
|
|
|
if debugNetstack() {
|
|
|
|
|
ns.logf("netstack: sending 4via6 packet to host: %v", srcIP)
|
2022-04-14 22:17:26 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-05-05 22:00:19 +00:00
|
|
|
|
default:
|
|
|
|
|
// unknown; don't forward to host
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// pkt has a non-zero refcount, so injection methods takes
|
|
|
|
|
// ownership of one count and will decrement on completion.
|
|
|
|
|
if sendToHost {
|
|
|
|
|
if err := ns.tundev.InjectInboundPacketBuffer(pkt); err != nil {
|
|
|
|
|
log.Printf("netstack inject inbound: %v", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if err := ns.tundev.InjectOutboundPacketBuffer(pkt); err != nil {
|
|
|
|
|
log.Printf("netstack inject outbound: %v", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 19:32:46 +00:00
|
|
|
|
// isLocalIP reports whether ip is a Tailscale IP assigned to this
|
|
|
|
|
// node directly (but not a subnet-routed IP).
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) isLocalIP(ip netip.Addr) bool {
|
2022-08-04 17:43:49 +00:00
|
|
|
|
return ns.atomicIsLocalIPFunc.Load()(ip)
|
2021-05-05 19:32:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-12-22 20:53:56 +00:00
|
|
|
|
func (ns *Impl) peerAPIPortAtomic(ip netip.Addr) *atomic.Uint32 {
|
2022-01-31 17:20:22 +00:00
|
|
|
|
if ip.Is4() {
|
|
|
|
|
return &ns.peerapiPort4Atomic
|
|
|
|
|
} else {
|
|
|
|
|
return &ns.peerapiPort6Atomic
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-30 15:47:16 +00:00
|
|
|
|
var viaRange = tsaddr.TailscaleViaRange()
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// shouldProcessInbound reports whether an inbound packet (a packet from a
|
|
|
|
|
// WireGuard peer) should be handled by netstack.
|
2021-10-29 23:21:18 +00:00
|
|
|
|
func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool {
|
2022-01-31 17:20:22 +00:00
|
|
|
|
// Handle incoming peerapi connections in netstack.
|
2022-11-08 15:47:22 +00:00
|
|
|
|
dstIP := p.Dst.Addr()
|
|
|
|
|
isLocal := ns.isLocalIP(dstIP)
|
|
|
|
|
|
|
|
|
|
// Handle TCP connection to the Tailscale IP(s) in some cases:
|
|
|
|
|
if ns.lb != nil && p.IPProto == ipproto.TCP && isLocal {
|
2022-01-31 17:20:22 +00:00
|
|
|
|
var peerAPIPort uint16
|
2022-11-08 15:47:22 +00:00
|
|
|
|
|
|
|
|
|
if p.TCPFlags&packet.TCPSynAck == packet.TCPSyn {
|
|
|
|
|
if port, ok := ns.lb.GetPeerAPIPort(dstIP); ok {
|
2022-01-31 17:20:22 +00:00
|
|
|
|
peerAPIPort = port
|
2022-12-22 20:53:56 +00:00
|
|
|
|
ns.peerAPIPortAtomic(dstIP).Store(uint32(port))
|
2022-01-31 17:20:22 +00:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2022-12-22 20:53:56 +00:00
|
|
|
|
peerAPIPort = uint16(ns.peerAPIPortAtomic(dstIP).Load())
|
2022-01-31 17:20:22 +00:00
|
|
|
|
}
|
2022-11-08 15:47:22 +00:00
|
|
|
|
dport := p.Dst.Port()
|
|
|
|
|
if dport == peerAPIPort {
|
|
|
|
|
return true
|
|
|
|
|
}
|
2022-11-07 23:32:53 +00:00
|
|
|
|
// Also handle SSH connections, webserver, etc, if enabled:
|
|
|
|
|
if ns.lb.ShouldInterceptTCPPort(dport) {
|
2022-01-31 17:20:22 +00:00
|
|
|
|
return true
|
|
|
|
|
}
|
2021-08-26 21:50:55 +00:00
|
|
|
|
}
|
2022-11-09 03:53:40 +00:00
|
|
|
|
if p.IPVersion == 6 && !isLocal && viaRange.Contains(dstIP) {
|
|
|
|
|
return ns.lb != nil && ns.lb.ShouldHandleViaIP(dstIP)
|
2022-03-30 15:47:16 +00:00
|
|
|
|
}
|
2021-10-29 23:21:18 +00:00
|
|
|
|
if ns.ProcessLocalIPs && isLocal {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
if ns.ProcessSubnets && !isLocal {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-09 00:44:11 +00:00
|
|
|
|
var userPingSem = syncs.NewSemaphore(20) // 20 child ping processes at once
|
|
|
|
|
|
2024-05-05 22:00:19 +00:00
|
|
|
|
type userPingDirection int
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// userPingDirectionOutbound is used when the pong packet is to be sent
|
|
|
|
|
// "outbound"–i.e. from this node to a peer via WireGuard.
|
|
|
|
|
userPingDirectionOutbound userPingDirection = iota
|
|
|
|
|
// userPingDirectionInbound is used when the pong packet is to be sent
|
|
|
|
|
// "inbound"–i.e. from Tailscale to another process on this host.
|
|
|
|
|
userPingDirectionInbound
|
|
|
|
|
)
|
|
|
|
|
|
2021-12-09 00:44:11 +00:00
|
|
|
|
// userPing tried to ping dstIP and if it succeeds, injects pingResPkt
|
|
|
|
|
// into the tundev.
|
|
|
|
|
//
|
|
|
|
|
// It's used in userspace/netstack mode when we don't have kernel
|
|
|
|
|
// support or raw socket access. As such, this does the dumbest thing
|
|
|
|
|
// that can work: runs the ping command. It's not super efficient, so
|
|
|
|
|
// it bounds the number of pings going on at once. The idea is that
|
|
|
|
|
// people only use ping occasionally to see if their internet's working
|
|
|
|
|
// so this doesn't need to be great.
|
2024-05-16 18:57:57 +00:00
|
|
|
|
// On Apple platforms, this function doesn't run the ping command. Instead,
|
|
|
|
|
// it sends a non-privileged ping.
|
2021-12-09 00:44:11 +00:00
|
|
|
|
//
|
2024-05-05 22:00:19 +00:00
|
|
|
|
// The 'direction' parameter is used to determine where the response "pong"
|
|
|
|
|
// packet should be written, if the ping succeeds. See the documentation on the
|
|
|
|
|
// constants for more details.
|
|
|
|
|
//
|
2021-12-09 00:44:11 +00:00
|
|
|
|
// TODO(bradfitz): when we're running on Windows as the system user, use
|
|
|
|
|
// raw socket APIs instead of ping child processes.
|
2024-05-05 22:00:19 +00:00
|
|
|
|
func (ns *Impl) userPing(dstIP netip.Addr, pingResPkt []byte, direction userPingDirection) {
|
2021-12-09 00:44:11 +00:00
|
|
|
|
if !userPingSem.TryAcquire() {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer userPingSem.Release()
|
|
|
|
|
|
|
|
|
|
t0 := time.Now()
|
2024-05-16 18:57:57 +00:00
|
|
|
|
err := ns.sendOutboundUserPing(dstIP, 3*time.Second)
|
2021-12-09 00:44:11 +00:00
|
|
|
|
d := time.Since(t0)
|
|
|
|
|
if err != nil {
|
2022-01-17 21:46:51 +00:00
|
|
|
|
if d < time.Second/2 {
|
|
|
|
|
// If it failed quicker than the 3 second
|
|
|
|
|
// timeout we gave above (500 ms is a
|
|
|
|
|
// reasonable threshold), then assume the ping
|
|
|
|
|
// failed for problems finding/running
|
|
|
|
|
// ping. We don't want to log if the host is
|
|
|
|
|
// just down.
|
|
|
|
|
ns.logf("exec ping of %v failed in %v: %v", dstIP, d, err)
|
|
|
|
|
}
|
2021-12-09 00:44:11 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-12-09 00:44:11 +00:00
|
|
|
|
ns.logf("exec pinged %v in %v", dstIP, time.Since(t0))
|
|
|
|
|
}
|
2024-05-05 22:00:19 +00:00
|
|
|
|
if direction == userPingDirectionOutbound {
|
|
|
|
|
if err := ns.tundev.InjectOutbound(pingResPkt); err != nil {
|
|
|
|
|
ns.logf("InjectOutbound ping response: %v", err)
|
|
|
|
|
}
|
|
|
|
|
} else if direction == userPingDirectionInbound {
|
|
|
|
|
if err := ns.tundev.InjectInboundCopy(pingResPkt); err != nil {
|
|
|
|
|
ns.logf("InjectInboundCopy ping response: %v", err)
|
|
|
|
|
}
|
2021-12-09 00:44:11 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// injectInbound is installed as a packet hook on the 'inbound' (from a
|
|
|
|
|
// WireGuard peer) path. Returning filter.Accept releases the packet to
|
|
|
|
|
// continue normally (typically being delivered to the host networking stack),
|
|
|
|
|
// whereas returning filter.DropSilently is done when netstack intercepts the
|
|
|
|
|
// packet and no further processing towards to host should be done.
|
2021-03-27 06:13:20 +00:00
|
|
|
|
func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Response {
|
2023-03-29 02:10:41 +00:00
|
|
|
|
if ns.ctx.Err() != nil {
|
|
|
|
|
return filter.DropSilently
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-29 23:21:18 +00:00
|
|
|
|
if !ns.shouldProcessInbound(p, t) {
|
|
|
|
|
// Let the host network stack (if any) deal with it.
|
2021-05-05 19:32:46 +00:00
|
|
|
|
return filter.Accept
|
|
|
|
|
}
|
2021-12-09 00:44:11 +00:00
|
|
|
|
|
2022-07-25 03:08:42 +00:00
|
|
|
|
destIP := p.Dst.Addr()
|
2022-09-21 18:19:34 +00:00
|
|
|
|
|
|
|
|
|
// If this is an echo request and we're a subnet router, handle pings
|
|
|
|
|
// ourselves instead of forwarding the packet on.
|
|
|
|
|
pingIP, handlePing := ns.shouldHandlePing(p)
|
|
|
|
|
if handlePing {
|
2021-12-09 00:44:11 +00:00
|
|
|
|
var pong []byte // the reply to the ping, if our relayed ping works
|
|
|
|
|
if destIP.Is4() {
|
|
|
|
|
h := p.ICMP4Header()
|
|
|
|
|
h.ToResponse()
|
|
|
|
|
pong = packet.Generate(&h, p.Payload())
|
|
|
|
|
} else if destIP.Is6() {
|
|
|
|
|
h := p.ICMP6Header()
|
|
|
|
|
h.ToResponse()
|
|
|
|
|
pong = packet.Generate(&h, p.Payload())
|
|
|
|
|
}
|
2024-05-05 22:00:19 +00:00
|
|
|
|
go ns.userPing(pingIP, pong, userPingDirectionOutbound)
|
2021-12-09 00:44:11 +00:00
|
|
|
|
return filter.DropSilently
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
var pn tcpip.NetworkProtocolNumber
|
|
|
|
|
switch p.IPVersion {
|
|
|
|
|
case 4:
|
|
|
|
|
pn = header.IPv4ProtocolNumber
|
|
|
|
|
case 6:
|
|
|
|
|
pn = header.IPv6ProtocolNumber
|
|
|
|
|
}
|
2022-01-07 00:07:50 +00:00
|
|
|
|
if debugPackets {
|
2021-03-03 18:37:01 +00:00
|
|
|
|
ns.logf("[v2] packet in (from %v): % x", p.Src, p.Buffer())
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
2023-09-06 09:45:52 +00:00
|
|
|
|
Payload: buffer.MakeWithData(bytes.Clone(p.Buffer())),
|
2020-09-03 22:45:41 +00:00
|
|
|
|
})
|
2021-02-25 19:18:16 +00:00
|
|
|
|
ns.linkEP.InjectInbound(pn, packetBuf)
|
2022-01-19 20:05:17 +00:00
|
|
|
|
packetBuf.DecRef()
|
2021-05-06 03:42:07 +00:00
|
|
|
|
|
|
|
|
|
// We've now delivered this to netstack, so we're done.
|
|
|
|
|
// Instead of returning a filter.Accept here (which would also
|
|
|
|
|
// potentially deliver it to the host OS), and instead of
|
|
|
|
|
// filter.Drop (which would log about rejected traffic),
|
|
|
|
|
// instead return filter.DropSilently which just quietly stops
|
|
|
|
|
// processing it in the tstun TUN wrapper.
|
|
|
|
|
return filter.DropSilently
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2020-09-03 22:45:41 +00:00
|
|
|
|
|
2022-09-21 18:19:34 +00:00
|
|
|
|
// shouldHandlePing returns whether or not netstack should handle an incoming
|
|
|
|
|
// ICMP echo request packet, and the IP address that should be pinged from this
|
|
|
|
|
// process. The IP address can be different from the destination in the packet
|
|
|
|
|
// if the destination is a 4via6 address.
|
|
|
|
|
func (ns *Impl) shouldHandlePing(p *packet.Parsed) (_ netip.Addr, ok bool) {
|
|
|
|
|
if !p.IsEchoRequest() {
|
|
|
|
|
return netip.Addr{}, false
|
|
|
|
|
}
|
2022-09-21 22:07:57 +00:00
|
|
|
|
|
|
|
|
|
destIP := p.Dst.Addr()
|
|
|
|
|
|
|
|
|
|
// We need to handle pings for all 4via6 addresses, even if this
|
|
|
|
|
// netstack instance normally isn't responsible for processing subnets.
|
|
|
|
|
//
|
|
|
|
|
// For example, on Linux, subnet router traffic could be handled via
|
|
|
|
|
// tun+iptables rules for most packets, but we still need to handle
|
|
|
|
|
// ICMP echo requests over 4via6 since the host networking stack
|
|
|
|
|
// doesn't know what to do with a 4via6 address.
|
|
|
|
|
//
|
|
|
|
|
// shouldProcessInbound returns 'true' to say that we should process
|
|
|
|
|
// all IPv6 packets with a destination address in the 'via' range, so
|
|
|
|
|
// check before we check the "ProcessSubnets" boolean below.
|
|
|
|
|
if viaRange.Contains(destIP) {
|
|
|
|
|
// The input echo request was to a 4via6 address, which we cannot
|
|
|
|
|
// simply ping as-is from this process. Translate the destination to an
|
|
|
|
|
// IPv4 address, so that our relayed ping (in userPing) is pinging the
|
|
|
|
|
// underlying destination IP.
|
|
|
|
|
//
|
|
|
|
|
// ICMPv4 and ICMPv6 are different protocols with different on-the-wire
|
|
|
|
|
// representations, so normally you can't send an ICMPv6 message over
|
|
|
|
|
// IPv4 and expect to get a useful result. However, in this specific
|
|
|
|
|
// case things are safe because the 'userPing' function doesn't make
|
|
|
|
|
// use of the input packet.
|
|
|
|
|
return tsaddr.UnmapVia(destIP), true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we get here, we don't do anything unless this netstack instance
|
|
|
|
|
// is responsible for processing subnet traffic.
|
2022-09-21 18:19:34 +00:00
|
|
|
|
if !ns.ProcessSubnets {
|
|
|
|
|
return netip.Addr{}, false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For non-4via6 addresses, we don't handle pings if they're destined
|
|
|
|
|
// for a Tailscale IP.
|
2022-09-21 22:07:57 +00:00
|
|
|
|
if tsaddr.IsTailscaleIP(destIP) {
|
|
|
|
|
return netip.Addr{}, false
|
2022-09-21 18:19:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-09-21 22:07:57 +00:00
|
|
|
|
// This netstack instance is processing subnet traffic, so handle the
|
|
|
|
|
// ping ourselves.
|
|
|
|
|
return destIP, true
|
2022-09-21 18:19:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr {
|
2023-09-06 09:45:52 +00:00
|
|
|
|
switch s.Len() {
|
2021-08-25 21:39:09 +00:00
|
|
|
|
case 4:
|
2023-09-06 09:45:52 +00:00
|
|
|
|
s := s.As4()
|
2021-08-25 21:39:09 +00:00
|
|
|
|
return netaddr.IPv4(s[0], s[1], s[2], s[3])
|
|
|
|
|
case 16:
|
2023-09-07 09:41:56 +00:00
|
|
|
|
s := s.As16()
|
|
|
|
|
return netip.AddrFrom16(s).Unmap()
|
2021-08-25 21:39:09 +00:00
|
|
|
|
}
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
return netip.Addr{}
|
2021-08-25 21:39:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) {
|
2021-04-21 18:50:48 +00:00
|
|
|
|
reqDetails := r.ID()
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-04-21 18:50:48 +00:00
|
|
|
|
ns.logf("[v2] TCP ForwarderRequest: %s", stringifyTEI(reqDetails))
|
2021-03-03 18:37:01 +00:00
|
|
|
|
}
|
2021-08-25 21:39:09 +00:00
|
|
|
|
clientRemoteIP := netaddrIPFromNetstackIP(reqDetails.RemoteAddress)
|
|
|
|
|
if !clientRemoteIP.IsValid() {
|
|
|
|
|
ns.logf("invalid RemoteAddress in TCP ForwarderRequest: %s", stringifyTEI(reqDetails))
|
2022-01-18 22:03:14 +00:00
|
|
|
|
r.Complete(true) // sends a RST
|
2021-08-25 21:39:09 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
|
|
|
|
|
// After we've returned from this function or have otherwise reached a
|
2024-02-29 04:21:31 +00:00
|
|
|
|
// non-pending state, decrement the per-client in-flight count and
|
|
|
|
|
// remove this endpoint from our packet tracking map so future TCP
|
|
|
|
|
// connections aren't dropped.
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
inFlightCompleted := false
|
2024-02-29 04:21:31 +00:00
|
|
|
|
tei := r.ID()
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
if !inFlightCompleted {
|
2024-02-29 04:21:31 +00:00
|
|
|
|
ns.decrementInFlightTCPForward(tei, clientRemoteIP)
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2022-11-09 23:38:09 +00:00
|
|
|
|
clientRemotePort := reqDetails.RemotePort
|
|
|
|
|
clientRemoteAddrPort := netip.AddrPortFrom(clientRemoteIP, clientRemotePort)
|
2021-08-25 21:39:09 +00:00
|
|
|
|
|
|
|
|
|
dialIP := netaddrIPFromNetstackIP(reqDetails.LocalAddress)
|
|
|
|
|
isTailscaleIP := tsaddr.IsTailscaleIP(dialIP)
|
2022-03-30 15:47:16 +00:00
|
|
|
|
|
2023-03-07 22:52:06 +00:00
|
|
|
|
dstAddrPort := netip.AddrPortFrom(dialIP, reqDetails.LocalPort)
|
|
|
|
|
|
2022-03-30 15:47:16 +00:00
|
|
|
|
if viaRange.Contains(dialIP) {
|
|
|
|
|
isTailscaleIP = false
|
|
|
|
|
dialIP = tsaddr.UnmapVia(dialIP)
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-29 18:33:05 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
if !isTailscaleIP {
|
|
|
|
|
// if this is a subnet IP, we added this in before the TCP handshake
|
|
|
|
|
// so netstack is happy TCP-handshaking as a subnet IP
|
2021-08-25 21:39:09 +00:00
|
|
|
|
ns.removeSubnetAddress(dialIP)
|
2021-03-29 18:33:05 +00:00
|
|
|
|
}
|
|
|
|
|
}()
|
2022-09-06 20:04:10 +00:00
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
var wq waiter.Queue
|
2021-08-25 21:39:09 +00:00
|
|
|
|
|
2022-09-06 20:04:10 +00:00
|
|
|
|
// We can't actually create the endpoint or complete the inbound
|
|
|
|
|
// request until we're sure that the connection can be handled by this
|
|
|
|
|
// endpoint. This function sets up the TCP connection and should be
|
|
|
|
|
// called immediately before a connection is handled.
|
2023-06-08 23:57:40 +00:00
|
|
|
|
getConnOrReset := func(opts ...tcpip.SettableSocketOption) *gonet.TCPConn {
|
2022-09-06 20:04:10 +00:00
|
|
|
|
ep, err := r.CreateEndpoint(&wq)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ns.logf("CreateEndpoint error for %s: %v", stringifyTEI(reqDetails), err)
|
|
|
|
|
r.Complete(true) // sends a RST
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
r.Complete(false)
|
2022-10-31 06:56:46 +00:00
|
|
|
|
for _, opt := range opts {
|
|
|
|
|
ep.SetSockOpt(opt)
|
|
|
|
|
}
|
2022-09-06 20:04:10 +00:00
|
|
|
|
// SetKeepAlive so that idle connections to peers that have forgotten about
|
|
|
|
|
// the connection or gone completely offline eventually time out.
|
|
|
|
|
// Applications might be setting this on a forwarded connection, but from
|
|
|
|
|
// userspace we can not see those, so the best we can do is to always
|
|
|
|
|
// perform them with conservative timing.
|
|
|
|
|
// TODO(tailscale/tailscale#4522): Netstack defaults match the Linux
|
|
|
|
|
// defaults, and results in a little over two hours before the socket would
|
|
|
|
|
// be closed due to keepalive. A shorter default might be better, or seeking
|
|
|
|
|
// a default from the host IP stack. This also might be a useful
|
|
|
|
|
// user-tunable, as in userspace mode this can have broad implications such
|
|
|
|
|
// as lingering connections to fork style daemons. On the other side of the
|
|
|
|
|
// fence, the long duration timers are low impact values for battery powered
|
|
|
|
|
// peers.
|
|
|
|
|
ep.SocketOptions().SetKeepAlive(true)
|
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
// This function is called when we're ready to use the
|
|
|
|
|
// underlying connection, and thus it's no longer in a
|
|
|
|
|
// "in-flight" state; decrement our per-client limit right now,
|
|
|
|
|
// and tell the defer in acceptTCP that it doesn't need to do
|
|
|
|
|
// so upon return.
|
2024-02-29 04:21:31 +00:00
|
|
|
|
ns.decrementInFlightTCPForward(tei, clientRemoteIP)
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
inFlightCompleted = true
|
|
|
|
|
|
2022-09-06 20:04:10 +00:00
|
|
|
|
// The ForwarderRequest.CreateEndpoint above asynchronously
|
|
|
|
|
// starts the TCP handshake. Note that the gonet.TCPConn
|
|
|
|
|
// methods c.RemoteAddr() and c.LocalAddr() will return nil
|
|
|
|
|
// until the handshake actually completes. But we have the
|
|
|
|
|
// remote address in reqDetails instead, so we don't use
|
|
|
|
|
// gonet.TCPConn.RemoteAddr. The byte copies in both
|
|
|
|
|
// directions to/from the gonet.TCPConn in forwardTCP will
|
|
|
|
|
// block until the TCP handshake is complete.
|
|
|
|
|
return gonet.NewTCPConn(&wq, ep)
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-11 21:01:59 +00:00
|
|
|
|
// Local Services (DNS and WebDAV)
|
2024-02-02 18:45:32 +00:00
|
|
|
|
hittingServiceIP := dialIP == serviceIP || dialIP == serviceIPv6
|
|
|
|
|
hittingDNS := hittingServiceIP && reqDetails.LocalPort == 53
|
2024-02-28 17:44:42 +00:00
|
|
|
|
if hittingDNS {
|
2023-06-08 23:57:40 +00:00
|
|
|
|
c := getConnOrReset()
|
2022-09-06 20:04:10 +00:00
|
|
|
|
if c == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
2024-02-02 18:45:32 +00:00
|
|
|
|
addrPort := netip.AddrPortFrom(clientRemoteIP, reqDetails.RemotePort)
|
2024-02-28 17:44:42 +00:00
|
|
|
|
go ns.dns.HandleTCPConn(c, addrPort)
|
2022-05-05 23:42:45 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-31 17:20:22 +00:00
|
|
|
|
if ns.lb != nil {
|
2023-06-08 23:57:40 +00:00
|
|
|
|
handler, opts := ns.lb.TCPHandlerForDst(clientRemoteAddrPort, dstAddrPort)
|
|
|
|
|
if handler != nil {
|
|
|
|
|
c := getConnOrReset(opts...) // will send a RST if it fails
|
2022-09-06 20:04:10 +00:00
|
|
|
|
if c == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-06-08 23:57:40 +00:00
|
|
|
|
handler(c)
|
2022-11-07 23:32:53 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
2022-01-31 17:20:22 +00:00
|
|
|
|
}
|
2022-05-05 21:53:36 +00:00
|
|
|
|
|
2023-03-07 22:52:06 +00:00
|
|
|
|
if ns.GetTCPHandlerForFlow != nil {
|
|
|
|
|
handler, ok := ns.GetTCPHandlerForFlow(clientRemoteAddrPort, dstAddrPort)
|
|
|
|
|
if ok {
|
|
|
|
|
if handler == nil {
|
|
|
|
|
r.Complete(true)
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-06-08 23:57:40 +00:00
|
|
|
|
c := getConnOrReset() // will send a RST if it fails
|
2023-03-07 22:52:06 +00:00
|
|
|
|
if c == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
handler(c)
|
2022-09-06 20:04:10 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-05-14 15:53:55 +00:00
|
|
|
|
}
|
2021-03-29 18:33:05 +00:00
|
|
|
|
if isTailscaleIP {
|
2021-08-25 21:39:09 +00:00
|
|
|
|
dialIP = netaddr.IPv4(127, 0, 0, 1)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
dialAddr := netip.AddrPortFrom(dialIP, uint16(reqDetails.LocalPort))
|
2022-09-06 20:04:10 +00:00
|
|
|
|
|
2023-06-08 23:57:40 +00:00
|
|
|
|
if !ns.forwardTCP(getConnOrReset, clientRemoteIP, &wq, dialAddr) {
|
2022-09-06 20:04:10 +00:00
|
|
|
|
r.Complete(true) // sends a RST
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
2020-09-03 22:45:41 +00:00
|
|
|
|
|
2022-10-31 06:56:46 +00:00
|
|
|
|
func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet.TCPConn, clientRemoteIP netip.Addr, wq *waiter.Queue, dialAddr netip.AddrPort) (handled bool) {
|
2021-08-25 21:39:09 +00:00
|
|
|
|
dialAddrStr := dialAddr.String()
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-12-15 19:30:14 +00:00
|
|
|
|
ns.logf("[v2] netstack: forwarding incoming connection to %s", dialAddrStr)
|
|
|
|
|
}
|
2021-08-25 21:39:09 +00:00
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
defer cancel()
|
2022-09-06 20:04:10 +00:00
|
|
|
|
|
2021-11-09 02:51:03 +00:00
|
|
|
|
waitEntry, notifyCh := waiter.NewChannelEntry(waiter.EventHUp) // TODO(bradfitz): right EventMask?
|
|
|
|
|
wq.EventRegister(&waitEntry)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
defer wq.EventUnregister(&waitEntry)
|
|
|
|
|
done := make(chan bool)
|
|
|
|
|
// netstack doesn't close the notification channel automatically if there was no
|
|
|
|
|
// hup signal, so we close done after we're done to not leak the goroutine below.
|
|
|
|
|
defer close(done)
|
|
|
|
|
go func() {
|
|
|
|
|
select {
|
|
|
|
|
case <-notifyCh:
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2022-01-07 00:07:50 +00:00
|
|
|
|
ns.logf("[v2] netstack: forwardTCP notifyCh fired; canceling context for %s", dialAddrStr)
|
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
case <-done:
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
cancel()
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}()
|
2022-09-06 20:04:10 +00:00
|
|
|
|
|
|
|
|
|
// Attempt to dial the outbound connection before we accept the inbound one.
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
var dialFunc func(context.Context, string, string) (net.Conn, error)
|
|
|
|
|
if ns.forwardDialFunc != nil {
|
|
|
|
|
dialFunc = ns.forwardDialFunc
|
|
|
|
|
} else {
|
|
|
|
|
var stdDialer net.Dialer
|
|
|
|
|
dialFunc = stdDialer.DialContext
|
|
|
|
|
}
|
|
|
|
|
server, err := dialFunc(ctx, "tcp", dialAddrStr)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
if err != nil {
|
2022-09-06 20:04:10 +00:00
|
|
|
|
ns.logf("netstack: could not connect to local server at %s: %v", dialAddr.String(), err)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
defer server.Close()
|
2022-09-06 20:04:10 +00:00
|
|
|
|
|
|
|
|
|
// If we get here, either the getClient call below will succeed and
|
|
|
|
|
// return something we can Close, or it will fail and will properly
|
|
|
|
|
// respond to the client with a RST. Either way, the caller no longer
|
|
|
|
|
// needs to clean up the client connection.
|
|
|
|
|
handled = true
|
|
|
|
|
|
|
|
|
|
// We dialed the connection; we can complete the client's TCP handshake.
|
|
|
|
|
client := getClient()
|
|
|
|
|
if client == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer client.Close()
|
|
|
|
|
|
2021-03-15 21:59:35 +00:00
|
|
|
|
backendLocalAddr := server.LocalAddr().(*net.TCPAddr)
|
2022-08-03 04:48:56 +00:00
|
|
|
|
backendLocalIPPort := netaddr.Unmap(backendLocalAddr.AddrPort())
|
2023-09-13 18:38:05 +00:00
|
|
|
|
ns.pm.RegisterIPPortIdentity(backendLocalIPPort, clientRemoteIP)
|
|
|
|
|
defer ns.pm.UnregisterIPPortIdentity(backendLocalIPPort)
|
2021-03-02 20:14:29 +00:00
|
|
|
|
connClosed := make(chan error, 2)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
go func() {
|
2021-03-02 20:14:29 +00:00
|
|
|
|
_, err := io.Copy(server, client)
|
|
|
|
|
connClosed <- err
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}()
|
|
|
|
|
go func() {
|
2021-03-02 20:14:29 +00:00
|
|
|
|
_, err := io.Copy(client, server)
|
|
|
|
|
connClosed <- err
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}()
|
2021-03-02 20:14:29 +00:00
|
|
|
|
err = <-connClosed
|
|
|
|
|
if err != nil {
|
|
|
|
|
ns.logf("proxy connection closed with error: %v", err)
|
|
|
|
|
}
|
2021-03-29 18:33:05 +00:00
|
|
|
|
ns.logf("[v2] netstack: forwarder connection to %s closed", dialAddrStr)
|
2022-09-06 20:04:10 +00:00
|
|
|
|
return
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-25 19:18:16 +00:00
|
|
|
|
func (ns *Impl) acceptUDP(r *udp.ForwarderRequest) {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
sess := r.ID()
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
ns.logf("[v2] UDP ForwarderRequest: %v", stringifyTEI(sess))
|
2021-04-21 18:50:48 +00:00
|
|
|
|
}
|
2021-02-25 19:18:16 +00:00
|
|
|
|
var wq waiter.Queue
|
|
|
|
|
ep, err := r.CreateEndpoint(&wq)
|
|
|
|
|
if err != nil {
|
2021-04-21 19:10:28 +00:00
|
|
|
|
ns.logf("acceptUDP: could not create endpoint: %v", err)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
return
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
dstAddr, ok := ipPortOfNetstackAddr(sess.LocalAddress, sess.LocalPort)
|
|
|
|
|
if !ok {
|
2022-09-21 04:27:47 +00:00
|
|
|
|
ep.Close()
|
2021-03-08 18:22:35 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
srcAddr, ok := ipPortOfNetstackAddr(sess.RemoteAddress, sess.RemotePort)
|
|
|
|
|
if !ok {
|
2022-09-21 04:27:47 +00:00
|
|
|
|
ep.Close()
|
2021-03-08 18:22:35 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
|
// Handle magicDNS traffic (via UDP) here.
|
2024-02-02 18:45:32 +00:00
|
|
|
|
if dst := dstAddr.Addr(); dst == serviceIP || dst == serviceIPv6 {
|
2022-04-14 22:17:26 +00:00
|
|
|
|
if dstAddr.Port() != 53 {
|
2022-09-21 04:27:47 +00:00
|
|
|
|
ep.Close()
|
2022-04-14 22:17:26 +00:00
|
|
|
|
return // Only MagicDNS traffic runs on the service IPs for now.
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-19 23:56:57 +00:00
|
|
|
|
c := gonet.NewUDPConn(&wq, ep)
|
2022-04-14 22:17:26 +00:00
|
|
|
|
go ns.handleMagicDNSUDP(srcAddr, c)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-05 16:50:20 +00:00
|
|
|
|
if get := ns.GetUDPHandlerForFlow; get != nil {
|
|
|
|
|
h, intercept := get(srcAddr, dstAddr)
|
|
|
|
|
if intercept {
|
|
|
|
|
if h == nil {
|
|
|
|
|
ep.Close()
|
|
|
|
|
return
|
|
|
|
|
}
|
2024-01-19 23:56:57 +00:00
|
|
|
|
go h(gonet.NewUDPConn(&wq, ep))
|
2023-03-05 16:50:20 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-19 23:56:57 +00:00
|
|
|
|
c := gonet.NewUDPConn(&wq, ep)
|
2023-03-05 16:50:20 +00:00
|
|
|
|
go ns.forwardUDP(c, srcAddr, dstAddr)
|
2021-02-25 19:18:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2023-09-22 15:49:09 +00:00
|
|
|
|
// Buffer pool for forwarding UDP packets. Implementations are advised not to
|
|
|
|
|
// exceed 512 bytes per DNS request due to fragmenting but in reality can and do
|
|
|
|
|
// send much larger packets, so use the maximum possible UDP packet size.
|
2023-09-19 12:55:09 +00:00
|
|
|
|
var udpBufPool = &sync.Pool{
|
|
|
|
|
New: func() any {
|
|
|
|
|
b := make([]byte, maxUDPPacketSize)
|
|
|
|
|
return &b
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func (ns *Impl) handleMagicDNSUDP(srcAddr netip.AddrPort, c *gonet.UDPConn) {
|
2022-05-20 20:30:11 +00:00
|
|
|
|
// Packets are being generated by the local host, so there should be
|
|
|
|
|
// very, very little latency. 150ms was chosen as something of an upper
|
|
|
|
|
// bound on resource usage, while hopefully still being long enough for
|
|
|
|
|
// a heavily loaded system.
|
|
|
|
|
const readDeadline = 150 * time.Millisecond
|
2022-04-14 22:17:26 +00:00
|
|
|
|
|
|
|
|
|
defer c.Close()
|
2023-09-19 12:55:09 +00:00
|
|
|
|
|
|
|
|
|
bufp := udpBufPool.Get().(*[]byte)
|
|
|
|
|
defer udpBufPool.Put(bufp)
|
|
|
|
|
q := *bufp
|
2022-05-20 20:30:11 +00:00
|
|
|
|
|
|
|
|
|
// libresolv from glibc is quite adamant that transmitting multiple DNS
|
|
|
|
|
// requests down the same UDP socket is valid. To support this, we read
|
|
|
|
|
// in a loop (with a tight deadline so we don't chew too many resources).
|
|
|
|
|
//
|
|
|
|
|
// See: https://github.com/bminor/glibc/blob/f7fbb99652eceb1b6b55e4be931649df5946497c/resolv/res_send.c#L995
|
|
|
|
|
for {
|
|
|
|
|
c.SetReadDeadline(time.Now().Add(readDeadline))
|
|
|
|
|
n, _, err := c.ReadFrom(q)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if oe, ok := err.(*net.OpError); !(ok && oe.Timeout()) {
|
|
|
|
|
ns.logf("dns udp read: %v", err) // log non-timeout errors
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-09-07 20:27:50 +00:00
|
|
|
|
resp, err := ns.dns.Query(context.Background(), q[:n], "udp", srcAddr)
|
2022-05-20 20:30:11 +00:00
|
|
|
|
if err != nil {
|
|
|
|
|
ns.logf("dns udp query: %v", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
c.Write(resp)
|
2022-04-14 22:17:26 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-21 15:38:13 +00:00
|
|
|
|
// forwardUDP proxies between client (with addr clientAddr) and dstAddr.
|
|
|
|
|
//
|
|
|
|
|
// dstAddr may be either a local Tailscale IP, in which we case we proxy to
|
|
|
|
|
// 127.0.0.1, or any other IP (from an advertised subnet), in which case we
|
|
|
|
|
// proxy to it directly.
|
2023-03-05 16:50:20 +00:00
|
|
|
|
func (ns *Impl) forwardUDP(client *gonet.UDPConn, clientAddr, dstAddr netip.AddrPort) {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
port, srcPort := dstAddr.Port(), clientAddr.Port()
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-12-15 19:30:14 +00:00
|
|
|
|
ns.logf("[v2] netstack: forwarding incoming UDP connection on port %v", port)
|
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
|
|
|
|
|
var backendListenAddr *net.UDPAddr
|
|
|
|
|
var backendRemoteAddr *net.UDPAddr
|
2022-07-25 03:08:42 +00:00
|
|
|
|
isLocal := ns.isLocalIP(dstAddr.Addr())
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if isLocal {
|
|
|
|
|
backendRemoteAddr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: int(port)}
|
|
|
|
|
backendListenAddr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: int(srcPort)}
|
|
|
|
|
} else {
|
2022-07-25 03:08:42 +00:00
|
|
|
|
if dstIP := dstAddr.Addr(); viaRange.Contains(dstIP) {
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
dstAddr = netip.AddrPortFrom(tsaddr.UnmapVia(dstIP), dstAddr.Port())
|
2022-03-30 15:47:16 +00:00
|
|
|
|
}
|
2022-07-25 03:08:42 +00:00
|
|
|
|
backendRemoteAddr = net.UDPAddrFromAddrPort(dstAddr)
|
|
|
|
|
if dstAddr.Addr().Is4() {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
backendListenAddr = &net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: int(srcPort)}
|
|
|
|
|
} else {
|
|
|
|
|
backendListenAddr = &net.UDPAddr{IP: net.ParseIP("::"), Port: int(srcPort)}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
backendConn, err := net.ListenUDP("udp", backendListenAddr)
|
2021-03-08 18:22:35 +00:00
|
|
|
|
if err != nil {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
ns.logf("netstack: could not bind local port %v: %v, trying again with random port", backendListenAddr.Port, err)
|
2021-03-15 21:59:35 +00:00
|
|
|
|
backendListenAddr.Port = 0
|
2021-07-21 15:38:13 +00:00
|
|
|
|
backendConn, err = net.ListenUDP("udp", backendListenAddr)
|
2020-09-03 22:45:41 +00:00
|
|
|
|
if err != nil {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
ns.logf("netstack: could not create UDP socket, preventing forwarding to %v: %v", dstAddr, err)
|
2021-03-08 18:22:35 +00:00
|
|
|
|
return
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-15 21:59:35 +00:00
|
|
|
|
backendLocalAddr := backendConn.LocalAddr().(*net.UDPAddr)
|
2022-08-03 04:48:56 +00:00
|
|
|
|
|
|
|
|
|
backendLocalIPPort := netip.AddrPortFrom(backendListenAddr.AddrPort().Addr().Unmap().WithZone(backendLocalAddr.Zone), backendLocalAddr.AddrPort().Port())
|
|
|
|
|
if !backendLocalIPPort.IsValid() {
|
2021-03-15 21:59:35 +00:00
|
|
|
|
ns.logf("could not get backend local IP:port from %v:%v", backendLocalAddr.IP, backendLocalAddr.Port)
|
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if isLocal {
|
2023-09-13 18:38:05 +00:00
|
|
|
|
ns.pm.RegisterIPPortIdentity(backendLocalIPPort, dstAddr.Addr())
|
2021-07-21 15:38:13 +00:00
|
|
|
|
}
|
2021-03-08 18:22:35 +00:00
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2021-07-21 15:38:13 +00:00
|
|
|
|
|
|
|
|
|
idleTimeout := 2 * time.Minute
|
|
|
|
|
if port == 53 {
|
|
|
|
|
// Make DNS packet copies time out much sooner.
|
|
|
|
|
//
|
|
|
|
|
// TODO(bradfitz): make DNS queries over UDP forwarding even
|
|
|
|
|
// cheaper by adding an additional idleTimeout post-DNS-reply.
|
|
|
|
|
// For instance, after the DNS response goes back out, then only
|
|
|
|
|
// wait a few seconds (or zero, really)
|
|
|
|
|
idleTimeout = 30 * time.Second
|
|
|
|
|
}
|
|
|
|
|
timer := time.AfterFunc(idleTimeout, func() {
|
|
|
|
|
if isLocal {
|
2023-09-13 18:38:05 +00:00
|
|
|
|
ns.pm.UnregisterIPPortIdentity(backendLocalIPPort)
|
2021-07-21 15:38:13 +00:00
|
|
|
|
}
|
|
|
|
|
ns.logf("netstack: UDP session between %s and %s timed out", backendListenAddr, backendRemoteAddr)
|
2021-03-08 18:22:35 +00:00
|
|
|
|
cancel()
|
|
|
|
|
client.Close()
|
|
|
|
|
backendConn.Close()
|
|
|
|
|
})
|
|
|
|
|
extend := func() {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
timer.Reset(idleTimeout)
|
2021-03-08 18:22:35 +00:00
|
|
|
|
}
|
2022-07-25 03:08:42 +00:00
|
|
|
|
startPacketCopy(ctx, cancel, client, net.UDPAddrFromAddrPort(clientAddr), backendConn, ns.logf, extend)
|
2021-03-08 18:43:01 +00:00
|
|
|
|
startPacketCopy(ctx, cancel, backendConn, backendRemoteAddr, client, ns.logf, extend)
|
2021-07-21 15:38:13 +00:00
|
|
|
|
if isLocal {
|
|
|
|
|
// Wait for the copies to be done before decrementing the
|
|
|
|
|
// subnet address count to potentially remove the route.
|
|
|
|
|
<-ctx.Done()
|
2022-07-25 03:08:42 +00:00
|
|
|
|
ns.removeSubnetAddress(dstAddr.Addr())
|
2021-07-21 15:38:13 +00:00
|
|
|
|
}
|
2021-03-08 18:22:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-08 18:43:01 +00:00
|
|
|
|
func startPacketCopy(ctx context.Context, cancel context.CancelFunc, dst net.PacketConn, dstAddr net.Addr, src net.PacketConn, logf logger.Logf, extend func()) {
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-07-21 15:38:13 +00:00
|
|
|
|
logf("[v2] netstack: startPacketCopy to %v (%T) from %T", dstAddr, dst, src)
|
|
|
|
|
}
|
2021-03-08 18:22:35 +00:00
|
|
|
|
go func() {
|
2021-03-08 18:43:01 +00:00
|
|
|
|
defer cancel() // tear down the other direction's copy
|
2023-09-19 12:55:09 +00:00
|
|
|
|
|
|
|
|
|
bufp := udpBufPool.Get().(*[]byte)
|
|
|
|
|
defer udpBufPool.Put(bufp)
|
|
|
|
|
pkt := *bufp
|
|
|
|
|
|
2021-03-08 18:22:35 +00:00
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
n, srcAddr, err := src.ReadFrom(pkt)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if ctx.Err() == nil {
|
|
|
|
|
logf("read packet from %s failed: %v", srcAddr, err)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
_, err = dst.WriteTo(pkt[:n], dstAddr)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if ctx.Err() == nil {
|
|
|
|
|
logf("write packet to %s failed: %v", dstAddr, err)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
2022-09-14 19:49:39 +00:00
|
|
|
|
if debugNetstack() {
|
2021-03-08 18:43:01 +00:00
|
|
|
|
logf("[v2] wrote UDP packet %s -> %s", srcAddr, dstAddr)
|
|
|
|
|
}
|
2021-03-08 18:22:35 +00:00
|
|
|
|
extend()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}()
|
2020-09-03 22:45:41 +00:00
|
|
|
|
}
|
2021-04-21 18:50:48 +00:00
|
|
|
|
|
|
|
|
|
func stringifyTEI(tei stack.TransportEndpointID) string {
|
|
|
|
|
localHostPort := net.JoinHostPort(tei.LocalAddress.String(), strconv.Itoa(int(tei.LocalPort)))
|
|
|
|
|
remoteHostPort := net.JoinHostPort(tei.RemoteAddress.String(), strconv.Itoa(int(tei.RemotePort)))
|
|
|
|
|
return fmt.Sprintf("%s -> %s", remoteHostPort, localHostPort)
|
|
|
|
|
}
|
2021-07-21 15:38:13 +00:00
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
|
func ipPortOfNetstackAddr(a tcpip.Address, port uint16) (ipp netip.AddrPort, ok bool) {
|
2023-09-07 09:41:56 +00:00
|
|
|
|
if addr, ok := netip.AddrFromSlice(a.AsSlice()); ok {
|
|
|
|
|
return netip.AddrPortFrom(addr, port), true
|
2022-08-03 04:48:56 +00:00
|
|
|
|
}
|
2023-09-07 09:41:56 +00:00
|
|
|
|
return netip.AddrPort{}, false
|
2021-07-21 15:38:13 +00:00
|
|
|
|
}
|
2024-01-19 23:06:55 +00:00
|
|
|
|
|
|
|
|
|
func readStatCounter(sc *tcpip.StatCounter) int64 {
|
|
|
|
|
vv := sc.Value()
|
|
|
|
|
if vv > math.MaxInt64 {
|
|
|
|
|
return int64(math.MaxInt64)
|
|
|
|
|
}
|
|
|
|
|
return int64(vv)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ExpVar returns an expvar variable suitable for registering with expvar.Publish.
|
|
|
|
|
func (ns *Impl) ExpVar() expvar.Var {
|
|
|
|
|
m := new(metrics.Set)
|
|
|
|
|
|
|
|
|
|
// Global metrics
|
|
|
|
|
stats := ns.ipstack.Stats()
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("counter_dropped_packets", expvar.Func(func() any {
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return readStatCounter(stats.DroppedPackets)
|
|
|
|
|
}))
|
|
|
|
|
|
|
|
|
|
// IP statistics
|
|
|
|
|
ipStats := ns.ipstack.Stats().IP
|
|
|
|
|
ipMetrics := []struct {
|
|
|
|
|
name string
|
|
|
|
|
field *tcpip.StatCounter
|
|
|
|
|
}{
|
|
|
|
|
{"packets_received", ipStats.PacketsReceived},
|
|
|
|
|
{"valid_packets_received", ipStats.ValidPacketsReceived},
|
|
|
|
|
{"disabled_packets_received", ipStats.DisabledPacketsReceived},
|
|
|
|
|
{"invalid_destination_addresses_received", ipStats.InvalidDestinationAddressesReceived},
|
|
|
|
|
{"invalid_source_addresses_received", ipStats.InvalidSourceAddressesReceived},
|
|
|
|
|
{"packets_delivered", ipStats.PacketsDelivered},
|
|
|
|
|
{"packets_sent", ipStats.PacketsSent},
|
|
|
|
|
{"outgoing_packet_errors", ipStats.OutgoingPacketErrors},
|
|
|
|
|
{"malformed_packets_received", ipStats.MalformedPacketsReceived},
|
|
|
|
|
{"malformed_fragments_received", ipStats.MalformedFragmentsReceived},
|
|
|
|
|
{"iptables_prerouting_dropped", ipStats.IPTablesPreroutingDropped},
|
|
|
|
|
{"iptables_input_dropped", ipStats.IPTablesInputDropped},
|
|
|
|
|
{"iptables_forward_dropped", ipStats.IPTablesForwardDropped},
|
|
|
|
|
{"iptables_output_dropped", ipStats.IPTablesOutputDropped},
|
|
|
|
|
{"iptables_postrouting_dropped", ipStats.IPTablesPostroutingDropped},
|
|
|
|
|
{"option_timestamp_received", ipStats.OptionTimestampReceived},
|
|
|
|
|
{"option_record_route_received", ipStats.OptionRecordRouteReceived},
|
|
|
|
|
{"option_router_alert_received", ipStats.OptionRouterAlertReceived},
|
|
|
|
|
{"option_unknown_received", ipStats.OptionUnknownReceived},
|
|
|
|
|
}
|
|
|
|
|
for _, metric := range ipMetrics {
|
|
|
|
|
metric := metric
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("counter_ip_"+metric.name, expvar.Func(func() any {
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return readStatCounter(metric.field)
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// IP forwarding statistics
|
|
|
|
|
fwdStats := ipStats.Forwarding
|
|
|
|
|
fwdMetrics := []struct {
|
|
|
|
|
name string
|
|
|
|
|
field *tcpip.StatCounter
|
|
|
|
|
}{
|
|
|
|
|
{"unrouteable", fwdStats.Unrouteable},
|
|
|
|
|
{"exhausted_ttl", fwdStats.ExhaustedTTL},
|
|
|
|
|
{"initializing_source", fwdStats.InitializingSource},
|
|
|
|
|
{"link_local_source", fwdStats.LinkLocalSource},
|
|
|
|
|
{"link_local_destination", fwdStats.LinkLocalDestination},
|
|
|
|
|
{"packet_too_big", fwdStats.PacketTooBig},
|
|
|
|
|
{"host_unreachable", fwdStats.HostUnreachable},
|
|
|
|
|
{"extension_header_problem", fwdStats.ExtensionHeaderProblem},
|
|
|
|
|
{"unexpected_multicast_input_interface", fwdStats.UnexpectedMulticastInputInterface},
|
|
|
|
|
{"unknown_output_endpoint", fwdStats.UnknownOutputEndpoint},
|
|
|
|
|
{"no_multicast_pending_queue_buffer_space", fwdStats.NoMulticastPendingQueueBufferSpace},
|
|
|
|
|
{"outgoing_device_no_buffer_space", fwdStats.OutgoingDeviceNoBufferSpace},
|
|
|
|
|
{"errors", fwdStats.Errors},
|
|
|
|
|
}
|
|
|
|
|
for _, metric := range fwdMetrics {
|
|
|
|
|
metric := metric
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("counter_ip_forward_"+metric.name, expvar.Func(func() any {
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return readStatCounter(metric.field)
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TCP metrics
|
|
|
|
|
tcpStats := ns.ipstack.Stats().TCP
|
|
|
|
|
tcpMetrics := []struct {
|
|
|
|
|
name string
|
|
|
|
|
field *tcpip.StatCounter
|
|
|
|
|
}{
|
|
|
|
|
{"active_connection_openings", tcpStats.ActiveConnectionOpenings},
|
|
|
|
|
{"passive_connection_openings", tcpStats.PassiveConnectionOpenings},
|
|
|
|
|
{"established_resets", tcpStats.EstablishedResets},
|
|
|
|
|
{"established_closed", tcpStats.EstablishedClosed},
|
|
|
|
|
{"established_timeout", tcpStats.EstablishedTimedout},
|
|
|
|
|
{"listen_overflow_syn_drop", tcpStats.ListenOverflowSynDrop},
|
|
|
|
|
{"listen_overflow_ack_drop", tcpStats.ListenOverflowAckDrop},
|
|
|
|
|
{"listen_overflow_syn_cookie_sent", tcpStats.ListenOverflowSynCookieSent},
|
|
|
|
|
{"listen_overflow_syn_cookie_rcvd", tcpStats.ListenOverflowSynCookieRcvd},
|
|
|
|
|
{"listen_overflow_invalid_syn_cookie_rcvd", tcpStats.ListenOverflowInvalidSynCookieRcvd},
|
|
|
|
|
{"failed_connection_attempts", tcpStats.FailedConnectionAttempts},
|
|
|
|
|
{"valid_segments_received", tcpStats.ValidSegmentsReceived},
|
|
|
|
|
{"invalid_segments_received", tcpStats.InvalidSegmentsReceived},
|
|
|
|
|
{"segments_sent", tcpStats.SegmentsSent},
|
|
|
|
|
{"segment_send_errors", tcpStats.SegmentSendErrors},
|
|
|
|
|
{"resets_sent", tcpStats.ResetsSent},
|
|
|
|
|
{"resets_received", tcpStats.ResetsReceived},
|
|
|
|
|
{"retransmits", tcpStats.Retransmits},
|
|
|
|
|
{"fast_recovery", tcpStats.FastRecovery},
|
|
|
|
|
{"sack_recovery", tcpStats.SACKRecovery},
|
|
|
|
|
{"tlp_recovery", tcpStats.TLPRecovery},
|
|
|
|
|
{"slow_start_retransmits", tcpStats.SlowStartRetransmits},
|
|
|
|
|
{"fast_retransmit", tcpStats.FastRetransmit},
|
|
|
|
|
{"timeouts", tcpStats.Timeouts},
|
|
|
|
|
{"checksum_errors", tcpStats.ChecksumErrors},
|
|
|
|
|
{"failed_port_reservations", tcpStats.FailedPortReservations},
|
|
|
|
|
{"segments_acked_with_dsack", tcpStats.SegmentsAckedWithDSACK},
|
|
|
|
|
{"spurious_recovery", tcpStats.SpuriousRecovery},
|
|
|
|
|
{"spurious_rto_recovery", tcpStats.SpuriousRTORecovery},
|
2024-02-26 14:05:18 +00:00
|
|
|
|
{"forward_max_in_flight_drop", tcpStats.ForwardMaxInFlightDrop},
|
2024-01-19 23:06:55 +00:00
|
|
|
|
}
|
|
|
|
|
for _, metric := range tcpMetrics {
|
|
|
|
|
metric := metric
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("counter_tcp_"+metric.name, expvar.Func(func() any {
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return readStatCounter(metric.field)
|
|
|
|
|
}))
|
|
|
|
|
}
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("gauge_tcp_current_established", expvar.Func(func() any {
|
|
|
|
|
return readStatCounter(tcpStats.CurrentEstablished)
|
|
|
|
|
}))
|
|
|
|
|
m.Set("gauge_tcp_current_connected", expvar.Func(func() any {
|
|
|
|
|
return readStatCounter(tcpStats.CurrentConnected)
|
|
|
|
|
}))
|
2024-01-19 23:06:55 +00:00
|
|
|
|
|
|
|
|
|
// UDP metrics
|
|
|
|
|
udpStats := ns.ipstack.Stats().UDP
|
|
|
|
|
udpMetrics := []struct {
|
|
|
|
|
name string
|
|
|
|
|
field *tcpip.StatCounter
|
|
|
|
|
}{
|
|
|
|
|
{"packets_received", udpStats.PacketsReceived},
|
|
|
|
|
{"unknown_port_errors", udpStats.UnknownPortErrors},
|
|
|
|
|
{"receive_buffer_errors", udpStats.ReceiveBufferErrors},
|
|
|
|
|
{"malformed_packets_received", udpStats.MalformedPacketsReceived},
|
|
|
|
|
{"packets_sent", udpStats.PacketsSent},
|
|
|
|
|
{"packet_send_errors", udpStats.PacketSendErrors},
|
|
|
|
|
{"checksum_errors", udpStats.ChecksumErrors},
|
|
|
|
|
}
|
|
|
|
|
for _, metric := range udpMetrics {
|
|
|
|
|
metric := metric
|
2024-02-26 14:05:18 +00:00
|
|
|
|
m.Set("counter_udp_"+metric.name, expvar.Func(func() any {
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return readStatCounter(metric.field)
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
wgengine/netstack: add a per-client limit for in-flight TCP forwards
This is a fun one. Right now, when a client is connecting through a
subnet router, here's roughly what happens:
1. The client initiates a connection to an IP address behind a subnet
router, and sends a TCP SYN
2. The subnet router gets the SYN packet from netstack, and after
running through acceptTCP, starts DialContext-ing the destination IP,
without accepting the connection¹
3. The client retransmits the SYN packet a few times while the dial is
in progress, until either...
4. The subnet router successfully establishes a connection to the
destination IP and sends the SYN-ACK back to the client, or...
5. The subnet router times out and sends a RST to the client.
6. If the connection was successful, the client ACKs the SYN-ACK it
received, and traffic starts flowing
As a result, the notification code in forwardTCP never notices when a
new connection attempt is aborted, and it will wait until either the
connection is established, or until the OS-level connection timeout is
reached and it aborts.
To mitigate this, add a per-client limit on how many in-flight TCP
forwarding connections can be in-progress; after this, clients will see
a similar behaviour to the global limit, where new connection attempts
are aborted instead of waiting. This prevents a single misbehaving
client from blocking all other clients of a subnet router by ensuring
that it doesn't starve the global limiter.
Also, bump the global limit again to a higher value.
¹ We can't accept the connection before establishing a connection to the
remote server since otherwise we'd be opening the connection and then
immediately closing it, which breaks a bunch of stuff; see #5503 for
more details.
Updates tailscale/corp#12184
Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I76e7008ddd497303d75d473f534e32309c8a5144
2024-02-26 20:06:47 +00:00
|
|
|
|
// Export gauges that show the current TCP forwarding limits.
|
|
|
|
|
m.Set("gauge_tcp_forward_in_flight_limit", expvar.Func(func() any {
|
|
|
|
|
return maxInFlightConnectionAttempts()
|
|
|
|
|
}))
|
|
|
|
|
m.Set("gauge_tcp_forward_in_flight_per_client_limit", expvar.Func(func() any {
|
|
|
|
|
return maxInFlightConnectionAttemptsPerClient()
|
|
|
|
|
}))
|
|
|
|
|
|
|
|
|
|
// This metric tracks the number of in-flight TCP forwarding
|
|
|
|
|
// connections that are "in-flight"–i.e. waiting to complete.
|
|
|
|
|
m.Set("gauge_tcp_forward_in_flight", expvar.Func(func() any {
|
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
defer ns.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
var sum int64
|
|
|
|
|
for _, n := range ns.connsInFlightByClient {
|
|
|
|
|
sum += int64(n)
|
|
|
|
|
}
|
|
|
|
|
return sum
|
|
|
|
|
}))
|
|
|
|
|
|
|
|
|
|
m.Set("counter_tcp_forward_max_in_flight_per_client_drop", &ns.forwardInFlightPerClientDropped)
|
|
|
|
|
|
|
|
|
|
// This metric tracks how many (if any) of the per-client limit on
|
|
|
|
|
// in-flight TCP forwarding requests have been reached.
|
|
|
|
|
m.Set("gauge_tcp_forward_in_flight_per_client_limit_reached", expvar.Func(func() any {
|
|
|
|
|
ns.mu.Lock()
|
|
|
|
|
defer ns.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
limit := maxInFlightConnectionAttemptsPerClient()
|
|
|
|
|
|
|
|
|
|
var count int64
|
|
|
|
|
for _, n := range ns.connsInFlightByClient {
|
|
|
|
|
if n == limit {
|
|
|
|
|
count++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return count
|
|
|
|
|
}))
|
|
|
|
|
|
2024-01-19 23:06:55 +00:00
|
|
|
|
return m
|
|
|
|
|
}
|