2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-05-13 13:16:17 +00:00
|
|
|
|
|
|
|
// Package tstun provides a TUN struct implementing the tun.Device interface
|
|
|
|
// with additional features as required by wgengine.
|
|
|
|
package tstun
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2021-07-23 16:45:04 +00:00
|
|
|
"fmt"
|
2020-05-13 13:16:17 +00:00
|
|
|
"io"
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
"net/netip"
|
2020-05-13 13:16:17 +00:00
|
|
|
"os"
|
2023-03-24 20:13:57 +00:00
|
|
|
"reflect"
|
2023-08-17 05:09:53 +00:00
|
|
|
"slices"
|
2021-07-23 16:45:04 +00:00
|
|
|
"strings"
|
2020-06-08 22:19:26 +00:00
|
|
|
"sync"
|
2020-05-13 13:16:17 +00:00
|
|
|
"sync/atomic"
|
2020-06-25 21:19:12 +00:00
|
|
|
"time"
|
2020-05-13 13:16:17 +00:00
|
|
|
|
2024-03-22 22:23:53 +00:00
|
|
|
"github.com/gaissmai/bart"
|
2022-12-09 23:12:20 +00:00
|
|
|
"github.com/tailscale/wireguard-go/device"
|
|
|
|
"github.com/tailscale/wireguard-go/tun"
|
2021-11-02 21:41:56 +00:00
|
|
|
"go4.org/mem"
|
2022-03-21 21:58:43 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
2021-09-13 21:21:40 +00:00
|
|
|
"tailscale.com/disco"
|
2022-11-28 23:59:33 +00:00
|
|
|
"tailscale.com/net/connstats"
|
2020-11-10 00:16:04 +00:00
|
|
|
"tailscale.com/net/packet"
|
2023-10-11 21:14:37 +00:00
|
|
|
"tailscale.com/net/packet/checksum"
|
2022-01-04 21:33:08 +00:00
|
|
|
"tailscale.com/net/tsaddr"
|
2022-08-04 17:43:49 +00:00
|
|
|
"tailscale.com/syncs"
|
2021-07-21 17:43:53 +00:00
|
|
|
"tailscale.com/tstime/mono"
|
2021-03-21 04:45:47 +00:00
|
|
|
"tailscale.com/types/ipproto"
|
2021-11-02 21:41:56 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-05-13 13:16:17 +00:00
|
|
|
"tailscale.com/types/logger"
|
2022-12-09 20:13:41 +00:00
|
|
|
"tailscale.com/types/views"
|
2021-11-17 00:01:42 +00:00
|
|
|
"tailscale.com/util/clientmetric"
|
2022-12-09 20:13:41 +00:00
|
|
|
"tailscale.com/util/mak"
|
2023-09-09 16:55:57 +00:00
|
|
|
"tailscale.com/util/set"
|
2023-01-19 22:28:49 +00:00
|
|
|
"tailscale.com/wgengine/capture"
|
2020-05-13 13:16:17 +00:00
|
|
|
"tailscale.com/wgengine/filter"
|
2023-03-29 16:51:18 +00:00
|
|
|
"tailscale.com/wgengine/wgcfg"
|
2020-05-13 13:16:17 +00:00
|
|
|
)
|
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
const maxBufferSize = device.MaxMessageSize
|
|
|
|
|
|
|
|
// PacketStartOffset is the minimal amount of leading space that must exist
|
|
|
|
// before &packet[offset] in a packet passed to Read, Write, or InjectInboundDirect.
|
|
|
|
// This is necessary to avoid reallocation in wireguard-go internals.
|
|
|
|
const PacketStartOffset = device.MessageTransportHeaderSize
|
2020-05-13 13:16:17 +00:00
|
|
|
|
|
|
|
// MaxPacketSize is the maximum size (in bytes)
|
2021-03-27 06:13:20 +00:00
|
|
|
// of a packet that can be injected into a tstun.Wrapper.
|
2020-05-13 13:16:17 +00:00
|
|
|
const MaxPacketSize = device.MaxContentSize
|
|
|
|
|
2021-07-23 16:45:04 +00:00
|
|
|
const tapDebug = false // for super verbose TAP debugging
|
|
|
|
|
2020-05-13 13:16:17 +00:00
|
|
|
var (
|
2021-03-27 06:13:20 +00:00
|
|
|
// ErrClosed is returned when attempting an operation on a closed Wrapper.
|
2020-05-26 22:14:19 +00:00
|
|
|
ErrClosed = errors.New("device closed")
|
|
|
|
// ErrFiltered is returned when the acted-on packet is rejected by a filter.
|
|
|
|
ErrFiltered = errors.New("packet dropped by filter")
|
2020-05-13 13:16:17 +00:00
|
|
|
)
|
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
var (
|
|
|
|
errPacketTooBig = errors.New("packet too big")
|
|
|
|
errOffsetTooBig = errors.New("offset larger than buffer length")
|
|
|
|
errOffsetTooSmall = errors.New("offset smaller than PacketStartOffset")
|
|
|
|
)
|
|
|
|
|
2020-11-10 07:49:09 +00:00
|
|
|
// parsedPacketPool holds a pool of Parsed structs for use in filtering.
|
2020-07-24 15:29:36 +00:00
|
|
|
// This is needed because escape analysis cannot see that parsed packets
|
|
|
|
// do not escape through {Pre,Post}Filter{In,Out}.
|
2022-03-16 23:27:57 +00:00
|
|
|
var parsedPacketPool = sync.Pool{New: func() any { return new(packet.Parsed) }}
|
2020-07-24 15:29:36 +00:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// FilterFunc is a packet-filtering function with access to the Wrapper device.
|
2020-06-08 22:19:26 +00:00
|
|
|
// It must not hold onto the packet struct, as its backing storage will be reused.
|
2021-03-27 06:13:20 +00:00
|
|
|
type FilterFunc func(*packet.Parsed, *Wrapper) filter.Response
|
2020-05-26 22:14:19 +00:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// Wrapper augments a tun.Device with packet filtering and injection.
|
2023-10-13 19:41:10 +00:00
|
|
|
//
|
|
|
|
// A Wrapper starts in a "corked" mode where Read calls are blocked
|
|
|
|
// until the Wrapper's Start method is called.
|
2021-03-27 06:13:20 +00:00
|
|
|
type Wrapper struct {
|
2021-11-23 19:20:33 +00:00
|
|
|
logf logger.Logf
|
|
|
|
limitedLogf logger.Logf // aggressively rate-limited logf used for potentially high volume errors
|
2021-03-27 06:13:20 +00:00
|
|
|
// tdev is the underlying Wrapper device.
|
2021-07-23 16:45:04 +00:00
|
|
|
tdev tun.Device
|
|
|
|
isTAP bool // whether tdev is a TAP device
|
2020-05-13 13:16:17 +00:00
|
|
|
|
2023-10-13 19:41:10 +00:00
|
|
|
started atomic.Bool // whether Start has been called
|
|
|
|
startCh chan struct{} // closed in Start
|
|
|
|
|
2020-09-18 15:03:10 +00:00
|
|
|
closeOnce sync.Once
|
|
|
|
|
2021-11-23 19:20:33 +00:00
|
|
|
// lastActivityAtomic is read/written atomically.
|
|
|
|
// On 32 bit systems, if the fields above change,
|
2022-10-31 03:15:30 +00:00
|
|
|
// you might need to add an align64 field here.
|
2021-07-21 17:43:53 +00:00
|
|
|
lastActivityAtomic mono.Time // time of last send or receive
|
2020-06-25 21:19:12 +00:00
|
|
|
|
2022-08-04 17:43:49 +00:00
|
|
|
destIPActivity syncs.AtomicValue[map[netip.Addr]func()]
|
2023-12-22 01:40:03 +00:00
|
|
|
//lint:ignore U1000 used in tap_linux.go
|
|
|
|
destMACAtomic syncs.AtomicValue[[6]byte]
|
|
|
|
discoKey syncs.AtomicValue[key.DiscoPublic]
|
2020-07-23 22:15:28 +00:00
|
|
|
|
2023-04-21 14:27:15 +00:00
|
|
|
// timeNow, if non-nil, will be used to obtain the current time.
|
|
|
|
timeNow func() time.Time
|
|
|
|
|
2024-04-30 22:33:59 +00:00
|
|
|
// peerConfig stores the current NAT configuration.
|
|
|
|
peerConfig atomic.Pointer[peerConfig]
|
2022-12-09 20:13:41 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// vectorBuffer stores the oldest unconsumed packet vector from tdev. It is
|
|
|
|
// allocated in wrap() and the underlying arrays should never grow.
|
|
|
|
vectorBuffer [][]byte
|
|
|
|
// bufferConsumedMu protects bufferConsumed from concurrent sends, closes,
|
|
|
|
// and send-after-close (by way of bufferConsumedClosed).
|
2021-07-07 22:45:00 +00:00
|
|
|
bufferConsumedMu sync.Mutex
|
2022-12-09 01:58:14 +00:00
|
|
|
// bufferConsumedClosed is true when bufferConsumed has been closed. This is
|
|
|
|
// read by bufferConsumed writers to prevent send-after-close.
|
|
|
|
bufferConsumedClosed bool
|
|
|
|
// bufferConsumed synchronizes access to vectorBuffer (shared by Read() and
|
|
|
|
// pollVector()).
|
2021-07-01 21:45:17 +00:00
|
|
|
//
|
2022-12-09 01:58:14 +00:00
|
|
|
// Close closes bufferConsumed and sets bufferConsumedClosed to true.
|
2020-05-13 13:16:17 +00:00
|
|
|
bufferConsumed chan struct{}
|
|
|
|
|
|
|
|
// closed signals poll (by closing) when the device is closed.
|
|
|
|
closed chan struct{}
|
2022-12-09 01:58:14 +00:00
|
|
|
// outboundMu protects outbound and vectorOutbound from concurrent sends,
|
|
|
|
// closes, and send-after-close (by way of outboundClosed).
|
2021-07-07 22:45:00 +00:00
|
|
|
outboundMu sync.Mutex
|
2022-12-09 01:58:14 +00:00
|
|
|
// outboundClosed is true when outbound or vectorOutbound have been closed.
|
|
|
|
// This is read by outbound and vectorOutbound writers to prevent
|
|
|
|
// send-after-close.
|
|
|
|
outboundClosed bool
|
|
|
|
// vectorOutbound is the queue by which packets leave the TUN device.
|
2020-05-26 22:14:19 +00:00
|
|
|
//
|
2020-05-13 13:16:17 +00:00
|
|
|
// The directions are relative to the network, not the device:
|
|
|
|
// inbound packets arrive via UDP and are written into the TUN device;
|
|
|
|
// outbound packets are read from the TUN device and sent out via UDP.
|
|
|
|
// This queue is needed because although inbound writes are synchronous,
|
2022-05-04 19:10:17 +00:00
|
|
|
// the other direction must wait on a WireGuard goroutine to poll it.
|
2020-05-26 22:14:19 +00:00
|
|
|
//
|
2022-05-04 19:10:17 +00:00
|
|
|
// Empty reads are skipped by WireGuard, so it is always legal
|
2022-12-09 01:58:14 +00:00
|
|
|
// to discard an empty packet instead of sending it through vectorOutbound.
|
2021-07-01 21:45:17 +00:00
|
|
|
//
|
2022-12-09 01:58:14 +00:00
|
|
|
// Close closes vectorOutbound and sets outboundClosed to true.
|
|
|
|
vectorOutbound chan tunVectorReadResult
|
2020-05-13 13:16:17 +00:00
|
|
|
|
2021-04-26 23:27:34 +00:00
|
|
|
// eventsUpDown yields up and down tun.Events that arrive on a Wrapper's events channel.
|
|
|
|
eventsUpDown chan tun.Event
|
|
|
|
// eventsOther yields non-up-and-down tun.Events that arrive on a Wrapper's events channel.
|
|
|
|
eventsOther chan tun.Event
|
|
|
|
|
wgengine/bench: speed test for channels, sockets, and wireguard-go.
This tries to generate traffic at a rate that will saturate the
receiver, without overdoing it, even in the event of packet loss. It's
unrealistically more aggressive than TCP (which will back off quickly
in case of packet loss) but less silly than a blind test that just
generates packets as fast as it can (which can cause all the CPU to be
absorbed by the transmitter, giving an incorrect impression of how much
capacity the total system has).
Initial indications are that a syscall about every 10 packets (TCP bulk
delivery) is roughly the same speed as sending every packet through a
channel. A syscall per packet is about 5x-10x slower than that.
The whole tailscale wireguard-go + magicsock + packet filter
combination is about 4x slower again, which is better than I thought
we'd do, but probably has room for improvement.
Note that in "full" tailscale, there is also a tundev read/write for
every packet, effectively doubling the syscall overhead per packet.
Given these numbers, it seems like read/write syscalls are only 25-40%
of the total CPU time used in tailscale proper, so we do have
significant non-syscall optimization work to do too.
Sample output:
$ GOMAXPROCS=2 go test -bench . -benchtime 5s ./cmd/tailbench
goos: linux
goarch: amd64
pkg: tailscale.com/cmd/tailbench
cpu: Intel(R) Core(TM) i7-4785T CPU @ 2.20GHz
BenchmarkTrivialNoAlloc/32-2 56340248 93.85 ns/op 340.98 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivialNoAlloc/124-2 57527490 99.27 ns/op 1249.10 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivialNoAlloc/1024-2 52537773 111.3 ns/op 9200.39 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/32-2 41878063 135.6 ns/op 236.04 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/124-2 41270439 138.4 ns/op 896.02 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/1024-2 36337252 154.3 ns/op 6635.30 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkBlockingChannel/32-2 12171654 494.3 ns/op 64.74 MB/s 0 %lost 1791 B/op 0 allocs/op
BenchmarkBlockingChannel/124-2 12149956 507.8 ns/op 244.17 MB/s 0 %lost 1792 B/op 1 allocs/op
BenchmarkBlockingChannel/1024-2 11034754 528.8 ns/op 1936.42 MB/s 0 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/32-2 8960622 2195 ns/op 14.58 MB/s 8.825 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/124-2 3014614 2224 ns/op 55.75 MB/s 11.18 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/1024-2 3234915 1688 ns/op 606.53 MB/s 3.765 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/32-2 8457559 764.1 ns/op 41.88 MB/s 5.945 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/124-2 5497726 1030 ns/op 120.38 MB/s 12.14 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/1024-2 7985656 1360 ns/op 752.86 MB/s 13.57 %lost 1792 B/op 1 allocs/op
BenchmarkUDP/32-2 1652134 3695 ns/op 8.66 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkUDP/124-2 1621024 3765 ns/op 32.94 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkUDP/1024-2 1553750 3825 ns/op 267.72 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkTCP/32-2 11056336 503.2 ns/op 63.60 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTCP/124-2 11074869 533.7 ns/op 232.32 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTCP/1024-2 8934968 671.4 ns/op 1525.20 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkWireGuardTest/32-2 1403702 4547 ns/op 7.04 MB/s 14.37 %lost 467 B/op 3 allocs/op
BenchmarkWireGuardTest/124-2 780645 7927 ns/op 15.64 MB/s 1.537 %lost 420 B/op 3 allocs/op
BenchmarkWireGuardTest/1024-2 512671 11791 ns/op 86.85 MB/s 0.5206 %lost 411 B/op 3 allocs/op
PASS
ok tailscale.com/wgengine/bench 195.724s
Updates #414.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2021-03-24 01:35:35 +00:00
|
|
|
// filter atomically stores the currently active packet filter
|
2022-08-04 04:31:40 +00:00
|
|
|
filter atomic.Pointer[filter.Filter]
|
2020-05-13 13:16:17 +00:00
|
|
|
// filterFlags control the verbosity of logging packet drops/accepts.
|
|
|
|
filterFlags filter.RunFlags
|
2020-06-05 15:19:03 +00:00
|
|
|
|
2023-03-16 16:29:14 +00:00
|
|
|
// PreFilterPacketInboundFromWireGuard is the inbound filter function that runs before the main filter
|
2020-06-08 22:19:26 +00:00
|
|
|
// and therefore sees the packets that may be later dropped by it.
|
2023-03-16 16:29:14 +00:00
|
|
|
PreFilterPacketInboundFromWireGuard FilterFunc
|
2024-02-28 04:25:36 +00:00
|
|
|
// PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter.
|
|
|
|
PostFilterPacketInboundFromWireGuard FilterFunc
|
2023-03-16 16:29:14 +00:00
|
|
|
// PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter
|
2022-04-14 21:59:46 +00:00
|
|
|
// for packets from the local system. This filter is populated by netstack to hook
|
|
|
|
// packets that should be handled by netstack. If set, this filter runs before
|
|
|
|
// PreFilterFromTunToEngine.
|
2023-03-16 16:29:14 +00:00
|
|
|
PreFilterPacketOutboundToWireGuardNetstackIntercept FilterFunc
|
|
|
|
// PreFilterPacketOutboundToWireGuardEngineIntercept is a filter function that runs before the main filter
|
2022-04-14 21:59:46 +00:00
|
|
|
// for packets from the local system. This filter is populated by wgengine to hook
|
|
|
|
// packets which it handles internally. If both this and PreFilterFromTunToNetstack
|
|
|
|
// filter functions are non-nil, this filter runs second.
|
2023-03-16 16:29:14 +00:00
|
|
|
PreFilterPacketOutboundToWireGuardEngineIntercept FilterFunc
|
|
|
|
// PostFilterPacketOutboundToWireGuard is the outbound filter function that runs after the main filter.
|
|
|
|
PostFilterPacketOutboundToWireGuard FilterFunc
|
2020-06-08 22:19:26 +00:00
|
|
|
|
2021-03-23 22:16:15 +00:00
|
|
|
// OnTSMPPongReceived, if non-nil, is called whenever a TSMP pong arrives.
|
2021-03-29 22:17:05 +00:00
|
|
|
OnTSMPPongReceived func(packet.TSMPPongReply)
|
|
|
|
|
2022-04-22 01:49:01 +00:00
|
|
|
// OnICMPEchoResponseReceived, if non-nil, is called whenever a ICMP echo response
|
|
|
|
// arrives. If the packet is to be handled internally this returns true,
|
|
|
|
// false otherwise.
|
|
|
|
OnICMPEchoResponseReceived func(*packet.Parsed) bool
|
|
|
|
|
2021-03-29 22:17:05 +00:00
|
|
|
// PeerAPIPort, if non-nil, returns the peerapi port that's
|
|
|
|
// running for the given IP address.
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
PeerAPIPort func(netip.Addr) (port uint16, ok bool)
|
2021-03-23 22:16:15 +00:00
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
// disableFilter disables all filtering when set. This should only be used in tests.
|
|
|
|
disableFilter bool
|
2021-04-07 18:32:53 +00:00
|
|
|
|
|
|
|
// disableTSMPRejected disables TSMP rejected responses. For tests.
|
|
|
|
disableTSMPRejected bool
|
2022-10-05 19:24:30 +00:00
|
|
|
|
|
|
|
// stats maintains per-connection counters.
|
2022-11-28 23:59:33 +00:00
|
|
|
stats atomic.Pointer[connstats.Statistics]
|
2023-01-19 22:28:49 +00:00
|
|
|
|
2023-02-08 23:48:27 +00:00
|
|
|
captureHook syncs.AtomicValue[capture.Callback]
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// tunInjectedRead is an injected packet pretending to be a tun.Read().
|
|
|
|
type tunInjectedRead struct {
|
|
|
|
// Only one of packet or data should be set, and are read in that order of
|
|
|
|
// precedence.
|
2024-03-07 00:56:02 +00:00
|
|
|
packet *stack.PacketBuffer
|
2022-03-21 21:58:43 +00:00
|
|
|
data []byte
|
2022-12-09 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// tunVectorReadResult is the result of a tun.Read(), or an injected packet
|
|
|
|
// pretending to be a tun.Read().
|
|
|
|
type tunVectorReadResult struct {
|
2022-12-15 00:29:34 +00:00
|
|
|
// When err AND data are nil, injected will be set with meaningful data
|
|
|
|
// (injected packet). If either err OR data is non-nil, injected should be
|
|
|
|
// ignored (a "real" tun.Read).
|
2022-12-09 01:58:14 +00:00
|
|
|
err error
|
|
|
|
data [][]byte
|
|
|
|
injected tunInjectedRead
|
2022-03-21 21:58:43 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
dataOffset int
|
|
|
|
}
|
|
|
|
|
|
|
|
type setWrapperer interface {
|
|
|
|
// setWrapper enables the underlying TUN/TAP to have access to the Wrapper.
|
|
|
|
// It MUST be called only once during initialization, other usage is unsafe.
|
|
|
|
setWrapper(*Wrapper)
|
2021-07-01 21:45:17 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 19:41:10 +00:00
|
|
|
// Start unblocks any Wrapper.Read calls that have already started
|
|
|
|
// and makes the Wrapper functional.
|
|
|
|
//
|
|
|
|
// Start must be called exactly once after the various Tailscale
|
|
|
|
// subsystems have been wired up to each other.
|
|
|
|
func (w *Wrapper) Start() {
|
|
|
|
w.started.Store(true)
|
|
|
|
close(w.startCh)
|
|
|
|
}
|
|
|
|
|
2021-07-23 16:45:04 +00:00
|
|
|
func WrapTAP(logf logger.Logf, tdev tun.Device) *Wrapper {
|
|
|
|
return wrap(logf, tdev, true)
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func Wrap(logf logger.Logf, tdev tun.Device) *Wrapper {
|
2021-07-23 16:45:04 +00:00
|
|
|
return wrap(logf, tdev, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func wrap(logf logger.Logf, tdev tun.Device, isTAP bool) *Wrapper {
|
2021-11-23 19:20:33 +00:00
|
|
|
logf = logger.WithPrefix(logf, "tstun: ")
|
2022-12-09 01:58:14 +00:00
|
|
|
w := &Wrapper{
|
2021-11-23 19:20:33 +00:00
|
|
|
logf: logf,
|
|
|
|
limitedLogf: logger.RateLimitedFn(logf, 1*time.Minute, 2, 10),
|
|
|
|
isTAP: isTAP,
|
|
|
|
tdev: tdev,
|
2020-05-13 13:16:17 +00:00
|
|
|
// bufferConsumed is conceptually a condition variable:
|
|
|
|
// a goroutine should not block when setting it, even with no listeners.
|
|
|
|
bufferConsumed: make(chan struct{}, 1),
|
|
|
|
closed: make(chan struct{}),
|
2022-12-09 01:58:14 +00:00
|
|
|
// vectorOutbound can be unbuffered; the buffer is an optimization.
|
|
|
|
vectorOutbound: make(chan tunVectorReadResult, 1),
|
|
|
|
eventsUpDown: make(chan tun.Event),
|
|
|
|
eventsOther: make(chan tun.Event),
|
2020-06-08 22:19:26 +00:00
|
|
|
// TODO(dmytro): (highly rate-limited) hexdumps should happen on unknown packets.
|
|
|
|
filterFlags: filter.LogAccepts | filter.LogDrops,
|
2023-10-13 19:41:10 +00:00
|
|
|
startCh: make(chan struct{}),
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2020-06-08 22:19:26 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
w.vectorBuffer = make([][]byte, tdev.BatchSize())
|
|
|
|
for i := range w.vectorBuffer {
|
|
|
|
w.vectorBuffer[i] = make([]byte, maxBufferSize)
|
|
|
|
}
|
|
|
|
go w.pollVector()
|
|
|
|
|
|
|
|
go w.pumpEvents()
|
2020-05-13 13:16:17 +00:00
|
|
|
// The buffer starts out consumed.
|
2022-12-09 01:58:14 +00:00
|
|
|
w.bufferConsumed <- struct{}{}
|
|
|
|
w.noteActivity()
|
|
|
|
|
|
|
|
if sw, ok := w.tdev.(setWrapperer); ok {
|
|
|
|
sw.setWrapper(w)
|
|
|
|
}
|
2020-05-13 13:16:17 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
return w
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2023-04-21 14:27:15 +00:00
|
|
|
// now returns the current time, either by calling t.timeNow if set or time.Now
|
|
|
|
// if not.
|
|
|
|
func (t *Wrapper) now() time.Time {
|
|
|
|
if t.timeNow != nil {
|
|
|
|
return t.timeNow()
|
|
|
|
}
|
|
|
|
return time.Now()
|
|
|
|
}
|
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
// SetDestIPActivityFuncs sets a map of funcs to run per packet
|
|
|
|
// destination (the map keys).
|
|
|
|
//
|
2021-03-27 06:13:20 +00:00
|
|
|
// The map ownership passes to the Wrapper. It must be non-nil.
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
func (t *Wrapper) SetDestIPActivityFuncs(m map[netip.Addr]func()) {
|
2020-12-20 00:43:25 +00:00
|
|
|
t.destIPActivity.Store(m)
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
|
|
|
|
2021-09-13 21:21:40 +00:00
|
|
|
// SetDiscoKey sets the current discovery key.
|
|
|
|
//
|
|
|
|
// It is only used for filtering out bogus traffic when network
|
|
|
|
// stack(s) get confused; see Issue 1526.
|
2021-11-02 21:41:56 +00:00
|
|
|
func (t *Wrapper) SetDiscoKey(k key.DiscoPublic) {
|
2021-09-13 21:21:40 +00:00
|
|
|
t.discoKey.Store(k)
|
|
|
|
}
|
|
|
|
|
|
|
|
// isSelfDisco reports whether packet p
|
|
|
|
// looks like a Disco packet from ourselves.
|
|
|
|
// See Issue 1526.
|
|
|
|
func (t *Wrapper) isSelfDisco(p *packet.Parsed) bool {
|
|
|
|
if p.IPProto != ipproto.UDP {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
pkt := p.Payload()
|
2021-11-02 21:41:56 +00:00
|
|
|
discobs, ok := disco.Source(pkt)
|
2021-09-13 21:21:40 +00:00
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2021-11-02 21:41:56 +00:00
|
|
|
discoSrc := key.DiscoPublicFromRaw32(mem.B(discobs))
|
2022-08-04 17:43:49 +00:00
|
|
|
selfDiscoPub := t.discoKey.Load()
|
|
|
|
return selfDiscoPub == discoSrc
|
2021-09-13 21:21:40 +00:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Close() error {
|
2020-09-18 15:03:10 +00:00
|
|
|
var err error
|
|
|
|
t.closeOnce.Do(func() {
|
2023-10-13 19:41:10 +00:00
|
|
|
if t.started.CompareAndSwap(false, true) {
|
|
|
|
close(t.startCh)
|
|
|
|
}
|
2020-05-13 13:16:17 +00:00
|
|
|
close(t.closed)
|
2021-07-07 22:45:00 +00:00
|
|
|
t.bufferConsumedMu.Lock()
|
2022-12-09 01:58:14 +00:00
|
|
|
t.bufferConsumedClosed = true
|
2021-07-01 21:45:17 +00:00
|
|
|
close(t.bufferConsumed)
|
2021-07-07 22:45:00 +00:00
|
|
|
t.bufferConsumedMu.Unlock()
|
|
|
|
t.outboundMu.Lock()
|
2022-12-09 01:58:14 +00:00
|
|
|
t.outboundClosed = true
|
|
|
|
close(t.vectorOutbound)
|
2021-07-07 22:45:00 +00:00
|
|
|
t.outboundMu.Unlock()
|
2020-09-18 15:03:10 +00:00
|
|
|
err = t.tdev.Close()
|
|
|
|
})
|
|
|
|
return err
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2021-07-01 21:48:09 +00:00
|
|
|
// isClosed reports whether t is closed.
|
|
|
|
func (t *Wrapper) isClosed() bool {
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-26 23:27:34 +00:00
|
|
|
// pumpEvents copies events from t.tdev to t.eventsUpDown and t.eventsOther.
|
|
|
|
// pumpEvents exits when t.tdev.events or t.closed is closed.
|
|
|
|
// pumpEvents closes t.eventsUpDown and t.eventsOther when it exits.
|
|
|
|
func (t *Wrapper) pumpEvents() {
|
|
|
|
defer close(t.eventsUpDown)
|
|
|
|
defer close(t.eventsOther)
|
|
|
|
src := t.tdev.Events()
|
|
|
|
for {
|
|
|
|
// Retrieve an event from the TUN device.
|
|
|
|
var event tun.Event
|
|
|
|
var ok bool
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return
|
|
|
|
case event, ok = <-src:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass along event to the correct recipient.
|
|
|
|
// Though event is a bitmask, in practice there is only ever one bit set at a time.
|
|
|
|
dst := t.eventsOther
|
|
|
|
if event&(tun.EventUp|tun.EventDown) != 0 {
|
|
|
|
dst = t.eventsUpDown
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return
|
|
|
|
case dst <- event:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// EventsUpDown returns a TUN event channel that contains all Up and Down events.
|
|
|
|
func (t *Wrapper) EventsUpDown() chan tun.Event {
|
|
|
|
return t.eventsUpDown
|
|
|
|
}
|
|
|
|
|
|
|
|
// Events returns a TUN event channel that contains all non-Up, non-Down events.
|
|
|
|
// It is named Events because it is the set of events that we want to expose to wireguard-go,
|
|
|
|
// and Events is the name specified by the wireguard-go tun.Device interface.
|
2022-12-09 01:58:14 +00:00
|
|
|
func (t *Wrapper) Events() <-chan tun.Event {
|
2021-04-26 23:27:34 +00:00
|
|
|
return t.eventsOther
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) File() *os.File {
|
2020-05-13 13:16:17 +00:00
|
|
|
return t.tdev.File()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) MTU() (int, error) {
|
2020-05-13 13:16:17 +00:00
|
|
|
return t.tdev.MTU()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Name() (string, error) {
|
2020-05-13 13:16:17 +00:00
|
|
|
return t.tdev.Name()
|
|
|
|
}
|
|
|
|
|
2021-07-23 16:45:04 +00:00
|
|
|
const ethernetFrameSize = 14 // 2 six byte MACs, 2 bytes ethertype
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// pollVector polls t.tdev.Read(), placing the oldest unconsumed packet vector
|
|
|
|
// into t.vectorBuffer. This is needed because t.tdev.Read() in general may
|
|
|
|
// block (it does on Windows), so packets may be stuck in t.vectorOutbound if
|
|
|
|
// t.Read() called t.tdev.Read() directly.
|
|
|
|
func (t *Wrapper) pollVector() {
|
|
|
|
sizes := make([]int, len(t.vectorBuffer))
|
|
|
|
readOffset := PacketStartOffset
|
|
|
|
if t.isTAP {
|
|
|
|
readOffset = PacketStartOffset - ethernetFrameSize
|
|
|
|
}
|
|
|
|
|
2021-07-01 21:48:09 +00:00
|
|
|
for range t.bufferConsumed {
|
2021-07-23 16:45:04 +00:00
|
|
|
DoRead:
|
2022-12-09 01:58:14 +00:00
|
|
|
for i := range t.vectorBuffer {
|
|
|
|
t.vectorBuffer[i] = t.vectorBuffer[i][:cap(t.vectorBuffer[i])]
|
|
|
|
}
|
2021-07-01 21:48:09 +00:00
|
|
|
var n int
|
|
|
|
var err error
|
|
|
|
for n == 0 && err == nil {
|
|
|
|
if t.isClosed() {
|
|
|
|
return
|
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
n, err = t.tdev.Read(t.vectorBuffer[:], sizes, readOffset)
|
|
|
|
if t.isTAP && tapDebug {
|
|
|
|
s := fmt.Sprintf("% x", t.vectorBuffer[0][:])
|
|
|
|
for strings.HasSuffix(s, " 00") {
|
|
|
|
s = strings.TrimSuffix(s, " 00")
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
t.logf("TAP read %v, %v: %s", n, err, s)
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
for i := range sizes[:n] {
|
|
|
|
t.vectorBuffer[i] = t.vectorBuffer[i][:readOffset+sizes[i]]
|
|
|
|
}
|
2021-07-23 16:45:04 +00:00
|
|
|
if t.isTAP {
|
|
|
|
if err == nil {
|
2022-12-09 01:58:14 +00:00
|
|
|
ethernetFrame := t.vectorBuffer[0][readOffset:]
|
2021-07-23 16:45:04 +00:00
|
|
|
if t.handleTAPFrame(ethernetFrame) {
|
|
|
|
goto DoRead
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Fall through. We got an IP packet.
|
2022-12-09 01:58:14 +00:00
|
|
|
if sizes[0] >= ethernetFrameSize {
|
|
|
|
t.vectorBuffer[0] = t.vectorBuffer[0][:readOffset+sizes[0]-ethernetFrameSize]
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
|
|
|
if tapDebug {
|
2022-12-09 01:58:14 +00:00
|
|
|
t.logf("tap regular frame: %x", t.vectorBuffer[0][PacketStartOffset:PacketStartOffset+sizes[0]])
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
2020-05-26 22:14:19 +00:00
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
t.sendVectorOutbound(tunVectorReadResult{
|
|
|
|
data: t.vectorBuffer[:n],
|
|
|
|
dataOffset: PacketStartOffset,
|
|
|
|
err: err,
|
|
|
|
})
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-07 22:45:00 +00:00
|
|
|
// sendBufferConsumed does t.bufferConsumed <- struct{}{}.
|
|
|
|
func (t *Wrapper) sendBufferConsumed() {
|
|
|
|
t.bufferConsumedMu.Lock()
|
|
|
|
defer t.bufferConsumedMu.Unlock()
|
2022-12-09 01:58:14 +00:00
|
|
|
if t.bufferConsumedClosed {
|
|
|
|
return
|
|
|
|
}
|
2021-07-07 22:45:00 +00:00
|
|
|
t.bufferConsumed <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// injectOutbound does t.vectorOutbound <- r
|
|
|
|
func (t *Wrapper) injectOutbound(r tunInjectedRead) {
|
2021-07-07 22:45:00 +00:00
|
|
|
t.outboundMu.Lock()
|
|
|
|
defer t.outboundMu.Unlock()
|
2022-12-09 01:58:14 +00:00
|
|
|
if t.outboundClosed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.vectorOutbound <- tunVectorReadResult{
|
|
|
|
injected: r,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendVectorOutbound does t.vectorOutbound <- r.
|
|
|
|
func (t *Wrapper) sendVectorOutbound(r tunVectorReadResult) {
|
|
|
|
t.outboundMu.Lock()
|
|
|
|
defer t.outboundMu.Unlock()
|
|
|
|
if t.outboundClosed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.vectorOutbound <- r
|
2021-07-07 22:45:00 +00:00
|
|
|
}
|
|
|
|
|
2023-10-03 18:55:06 +00:00
|
|
|
// snat does SNAT on p if the destination address requires a different source address.
|
|
|
|
func (t *Wrapper) snat(p *packet.Parsed) {
|
2024-04-30 22:33:59 +00:00
|
|
|
pc := t.peerConfig.Load()
|
2022-12-09 20:13:41 +00:00
|
|
|
oldSrc := p.Src.Addr()
|
2024-04-30 22:33:59 +00:00
|
|
|
newSrc := pc.selectSrcIP(oldSrc, p.Dst.Addr())
|
2022-12-09 20:13:41 +00:00
|
|
|
if oldSrc != newSrc {
|
2023-10-11 21:14:37 +00:00
|
|
|
checksum.UpdateSrcAddr(p, newSrc)
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-03 18:55:06 +00:00
|
|
|
// dnat does destination NAT on p.
|
|
|
|
func (t *Wrapper) dnat(p *packet.Parsed) {
|
2024-04-30 22:33:59 +00:00
|
|
|
pc := t.peerConfig.Load()
|
2022-12-09 20:13:41 +00:00
|
|
|
oldDst := p.Dst.Addr()
|
2024-04-30 22:33:59 +00:00
|
|
|
newDst := pc.mapDstIP(oldDst)
|
2022-12-09 20:13:41 +00:00
|
|
|
if newDst != oldDst {
|
2023-10-11 21:14:37 +00:00
|
|
|
checksum.UpdateDstAddr(p, newDst)
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// findV4 returns the first Tailscale IPv4 address in addrs.
|
|
|
|
func findV4(addrs []netip.Prefix) netip.Addr {
|
|
|
|
for _, ap := range addrs {
|
|
|
|
a := ap.Addr()
|
|
|
|
if a.Is4() && tsaddr.IsTailscaleIP(a) {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return netip.Addr{}
|
|
|
|
}
|
|
|
|
|
2023-10-03 18:55:06 +00:00
|
|
|
// findV6 returns the first Tailscale IPv6 address in addrs.
|
|
|
|
func findV6(addrs []netip.Prefix) netip.Addr {
|
|
|
|
for _, ap := range addrs {
|
|
|
|
a := ap.Addr()
|
|
|
|
if a.Is6() && tsaddr.IsTailscaleIP(a) {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return netip.Addr{}
|
|
|
|
}
|
|
|
|
|
2024-04-30 22:33:59 +00:00
|
|
|
// peerConfig is the configuration for different peers.
|
2023-10-03 18:55:06 +00:00
|
|
|
// It should be treated as immutable.
|
|
|
|
//
|
|
|
|
// The nil value is a valid configuration.
|
2024-04-30 22:33:59 +00:00
|
|
|
type peerConfig struct {
|
2024-04-30 22:29:27 +00:00
|
|
|
// nativeAddr4 and nativeAddr6 are the IPv4/IPv6 Tailscale Addresses of
|
|
|
|
// the current node.
|
|
|
|
//
|
|
|
|
// These are implicitly used as the address to rewrite to in the DNAT
|
|
|
|
// path (as configured by listenAddrs, below). The IPv4 address will be
|
|
|
|
// used if the inbound packet is IPv4, and the IPv6 address if the
|
|
|
|
// inbound packet is IPv6.
|
|
|
|
nativeAddr4, nativeAddr6 netip.Addr
|
2022-12-09 20:13:41 +00:00
|
|
|
|
2023-10-03 18:55:06 +00:00
|
|
|
// listenAddrs is the set of addresses that should be
|
2022-12-09 20:13:41 +00:00
|
|
|
// mapped to the native address. These are the addresses that
|
|
|
|
// peers will use to connect to this node.
|
|
|
|
listenAddrs views.Map[netip.Addr, struct{}] // masqAddr -> struct{}
|
|
|
|
|
2024-03-22 22:23:53 +00:00
|
|
|
// dstMasqAddrs is the routing table used to map a given dst IP to the
|
|
|
|
// respective MasqueradeAsIP address. The MasqueradeAsIP address is the
|
|
|
|
// address that should be used as the source address for packets to dst.
|
|
|
|
dstMasqAddrs *bart.Table[netip.Addr]
|
2022-12-09 20:13:41 +00:00
|
|
|
|
2024-03-22 22:23:53 +00:00
|
|
|
// masqAddrCounts is a count of peers by MasqueradeAsIP.
|
|
|
|
masqAddrCounts map[netip.Addr]int
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
|
2024-04-30 22:33:59 +00:00
|
|
|
func (c *peerConfig) String() string {
|
2023-09-19 21:50:13 +00:00
|
|
|
if c == nil {
|
2024-04-30 22:33:59 +00:00
|
|
|
return "peerConfig(nil)"
|
2023-09-19 21:50:13 +00:00
|
|
|
}
|
|
|
|
var b strings.Builder
|
2024-04-30 22:33:59 +00:00
|
|
|
b.WriteString("peerConfig{")
|
2024-04-30 22:29:27 +00:00
|
|
|
fmt.Fprintf(&b, "nativeAddr4: %v, ", c.nativeAddr4)
|
|
|
|
fmt.Fprintf(&b, "nativeAddr6: %v, ", c.nativeAddr6)
|
2023-09-19 21:50:13 +00:00
|
|
|
fmt.Fprint(&b, "listenAddrs: [")
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
c.listenAddrs.Range(func(k netip.Addr, _ struct{}) bool {
|
|
|
|
if i > 0 {
|
|
|
|
b.WriteString(", ")
|
|
|
|
}
|
|
|
|
b.WriteString(k.String())
|
|
|
|
i++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
b.WriteString("], dstMasqAddrs: [")
|
2024-03-22 22:23:53 +00:00
|
|
|
for k, v := range c.masqAddrCounts {
|
2023-09-19 21:50:13 +00:00
|
|
|
if i > 0 {
|
|
|
|
b.WriteString(", ")
|
|
|
|
}
|
|
|
|
fmt.Fprintf(&b, "%v: %v peers", k, v)
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
b.WriteString("]}")
|
|
|
|
|
|
|
|
return b.String()
|
|
|
|
}
|
|
|
|
|
2022-12-09 20:13:41 +00:00
|
|
|
// mapDstIP returns the destination IP to use for a packet to dst.
|
|
|
|
// If dst is not one of the listen addresses, it is returned as-is,
|
|
|
|
// otherwise the native address is returned.
|
2024-04-30 22:33:59 +00:00
|
|
|
func (c *peerConfig) mapDstIP(oldDst netip.Addr) netip.Addr {
|
2022-12-09 20:13:41 +00:00
|
|
|
if c == nil {
|
|
|
|
return oldDst
|
|
|
|
}
|
|
|
|
if _, ok := c.listenAddrs.GetOk(oldDst); ok {
|
2024-04-30 22:29:27 +00:00
|
|
|
if oldDst.Is4() && c.nativeAddr4.IsValid() {
|
|
|
|
return c.nativeAddr4
|
|
|
|
}
|
|
|
|
if oldDst.Is6() && c.nativeAddr6.IsValid() {
|
|
|
|
return c.nativeAddr6
|
|
|
|
}
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
return oldDst
|
|
|
|
}
|
|
|
|
|
|
|
|
// selectSrcIP returns the source IP to use for a packet to dst.
|
|
|
|
// If the packet is not from the native address, it is returned as-is.
|
2024-04-30 22:33:59 +00:00
|
|
|
func (c *peerConfig) selectSrcIP(oldSrc, dst netip.Addr) netip.Addr {
|
2022-12-09 20:13:41 +00:00
|
|
|
if c == nil {
|
|
|
|
return oldSrc
|
|
|
|
}
|
2024-04-30 22:29:27 +00:00
|
|
|
if oldSrc.Is4() && oldSrc != c.nativeAddr4 {
|
|
|
|
return oldSrc
|
|
|
|
}
|
|
|
|
if oldSrc.Is6() && oldSrc != c.nativeAddr6 {
|
2022-12-09 20:13:41 +00:00
|
|
|
return oldSrc
|
|
|
|
}
|
2024-03-22 22:23:53 +00:00
|
|
|
eip, ok := c.dstMasqAddrs.Get(dst)
|
2023-03-29 16:51:18 +00:00
|
|
|
if !ok {
|
|
|
|
return oldSrc
|
|
|
|
}
|
2024-03-22 22:23:53 +00:00
|
|
|
return eip
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
|
2024-04-30 22:33:59 +00:00
|
|
|
// peerConfigFromWGConfig generates a peerConfig from nm. If NAT is not required,
|
|
|
|
// and no additional configuration is present, it returns nil.
|
|
|
|
func peerConfigFromWGConfig(wcfg *wgcfg.Config) *peerConfig {
|
2023-03-29 16:51:18 +00:00
|
|
|
if wcfg == nil {
|
2022-12-09 20:13:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-10-03 18:55:06 +00:00
|
|
|
|
2024-04-30 22:29:27 +00:00
|
|
|
nativeAddr4 := findV4(wcfg.Addresses)
|
|
|
|
nativeAddr6 := findV6(wcfg.Addresses)
|
|
|
|
if !nativeAddr4.IsValid() && !nativeAddr6.IsValid() {
|
2022-12-09 20:13:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-10-03 18:55:06 +00:00
|
|
|
|
2022-12-09 20:13:41 +00:00
|
|
|
var (
|
2024-03-22 22:23:53 +00:00
|
|
|
rt bart.Table[netip.Addr]
|
|
|
|
masqAddrCounts = map[netip.Addr]int{}
|
|
|
|
listenAddrs set.Set[netip.Addr]
|
2022-12-09 20:13:41 +00:00
|
|
|
)
|
2023-05-02 19:12:44 +00:00
|
|
|
|
|
|
|
// When using an exit node that requires masquerading, we need to
|
|
|
|
// fill out the routing table with all peers not just the ones that
|
|
|
|
// require masquerading.
|
|
|
|
exitNodeRequiresMasq := false // true if using an exit node and it requires masquerading
|
|
|
|
for _, p := range wcfg.Peers {
|
|
|
|
isExitNode := slices.Contains(p.AllowedIPs, tsaddr.AllIPv4()) || slices.Contains(p.AllowedIPs, tsaddr.AllIPv6())
|
2023-10-03 18:55:06 +00:00
|
|
|
if isExitNode {
|
2024-04-30 22:29:27 +00:00
|
|
|
hasMasqAddr := false ||
|
|
|
|
(p.V4MasqAddr != nil && p.V4MasqAddr.IsValid()) ||
|
|
|
|
(p.V6MasqAddr != nil && p.V6MasqAddr.IsValid())
|
|
|
|
if hasMasqAddr {
|
2023-10-03 18:55:06 +00:00
|
|
|
exitNodeRequiresMasq = true
|
|
|
|
}
|
2023-05-02 19:12:44 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2023-03-29 16:51:18 +00:00
|
|
|
for i := range wcfg.Peers {
|
|
|
|
p := &wcfg.Peers[i]
|
2024-04-30 22:29:27 +00:00
|
|
|
|
|
|
|
// Build a routing table that configures DNAT (i.e. changing
|
|
|
|
// the V4MasqAddr/V6MasqAddr for a given peer to the current
|
|
|
|
// peer's v4/v6 IP).
|
|
|
|
var addrToUse4, addrToUse6 netip.Addr
|
|
|
|
if p.V4MasqAddr != nil && p.V4MasqAddr.IsValid() {
|
|
|
|
addrToUse4 = *p.V4MasqAddr
|
|
|
|
mak.Set(&listenAddrs, addrToUse4, struct{}{})
|
|
|
|
masqAddrCounts[addrToUse4]++
|
|
|
|
}
|
|
|
|
if p.V6MasqAddr != nil && p.V6MasqAddr.IsValid() {
|
|
|
|
addrToUse6 = *p.V6MasqAddr
|
|
|
|
mak.Set(&listenAddrs, addrToUse6, struct{}{})
|
|
|
|
masqAddrCounts[addrToUse6]++
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the exit node requires masquerading, set the masquerade
|
|
|
|
// addresses to our native addresses.
|
|
|
|
if exitNodeRequiresMasq {
|
|
|
|
if !addrToUse4.IsValid() && nativeAddr4.IsValid() {
|
|
|
|
addrToUse4 = nativeAddr4
|
|
|
|
}
|
|
|
|
if !addrToUse6.IsValid() && nativeAddr6.IsValid() {
|
|
|
|
addrToUse6 = nativeAddr6
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !addrToUse4.IsValid() && !addrToUse6.IsValid() {
|
|
|
|
// NAT not required for this peer.
|
2022-12-09 20:13:41 +00:00
|
|
|
continue
|
|
|
|
}
|
2024-03-22 22:23:53 +00:00
|
|
|
|
2024-04-30 22:29:27 +00:00
|
|
|
// Build the SNAT table that maps each AllowedIP to the
|
|
|
|
// masquerade address.
|
2024-03-22 22:23:53 +00:00
|
|
|
for _, ip := range p.AllowedIPs {
|
2024-04-30 22:29:27 +00:00
|
|
|
is4 := ip.Addr().Is4()
|
|
|
|
if is4 && addrToUse4.IsValid() {
|
|
|
|
rt.Insert(ip, addrToUse4)
|
|
|
|
}
|
|
|
|
if !is4 && addrToUse6.IsValid() {
|
|
|
|
rt.Insert(ip, addrToUse6)
|
|
|
|
}
|
2024-03-22 22:23:53 +00:00
|
|
|
}
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
2024-03-22 22:23:53 +00:00
|
|
|
if len(listenAddrs) == 0 && len(masqAddrCounts) == 0 {
|
2022-12-09 20:13:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2024-04-30 22:33:59 +00:00
|
|
|
return &peerConfig{
|
2024-04-30 22:29:27 +00:00
|
|
|
nativeAddr4: nativeAddr4,
|
|
|
|
nativeAddr6: nativeAddr6,
|
2024-03-22 22:23:53 +00:00
|
|
|
listenAddrs: views.MapOf(listenAddrs),
|
|
|
|
dstMasqAddrs: &rt,
|
|
|
|
masqAddrCounts: masqAddrCounts,
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetNetMap is called when a new NetworkMap is received.
|
2023-03-29 16:51:18 +00:00
|
|
|
func (t *Wrapper) SetWGConfig(wcfg *wgcfg.Config) {
|
2024-04-30 22:33:59 +00:00
|
|
|
cfg := peerConfigFromWGConfig(wcfg)
|
2023-10-03 18:55:06 +00:00
|
|
|
|
2024-04-30 22:33:59 +00:00
|
|
|
old := t.peerConfig.Swap(cfg)
|
2023-03-24 20:13:57 +00:00
|
|
|
if !reflect.DeepEqual(old, cfg) {
|
2024-04-30 22:33:59 +00:00
|
|
|
t.logf("peer config: %v", cfg)
|
2023-03-24 20:13:57 +00:00
|
|
|
}
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
|
2022-01-04 21:33:08 +00:00
|
|
|
var (
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
magicDNSIPPort = netip.AddrPortFrom(tsaddr.TailscaleServiceIP(), 0) // 100.100.100.100:0
|
|
|
|
magicDNSIPPortv6 = netip.AddrPortFrom(tsaddr.TailscaleServiceIPv6(), 0)
|
2022-01-04 21:33:08 +00:00
|
|
|
)
|
2021-02-11 19:51:59 +00:00
|
|
|
|
2023-03-16 16:29:14 +00:00
|
|
|
func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed) filter.Response {
|
2021-02-11 19:51:59 +00:00
|
|
|
// Fake ICMP echo responses to MagicDNS (100.100.100.100).
|
2022-01-04 21:33:08 +00:00
|
|
|
if p.IsEchoRequest() {
|
|
|
|
switch p.Dst {
|
|
|
|
case magicDNSIPPort:
|
|
|
|
header := p.ICMP4Header()
|
|
|
|
header.ToResponse()
|
|
|
|
outp := packet.Generate(&header, p.Payload())
|
|
|
|
t.InjectInboundCopy(outp)
|
|
|
|
return filter.DropSilently // don't pass on to OS; already handled
|
|
|
|
case magicDNSIPPortv6:
|
|
|
|
header := p.ICMP6Header()
|
|
|
|
header.ToResponse()
|
|
|
|
outp := packet.Generate(&header, p.Payload())
|
|
|
|
t.InjectInboundCopy(outp)
|
|
|
|
return filter.DropSilently // don't pass on to OS; already handled
|
|
|
|
}
|
2021-02-11 19:51:59 +00:00
|
|
|
}
|
2020-06-08 22:19:26 +00:00
|
|
|
|
2021-09-29 20:58:14 +00:00
|
|
|
// Issue 1526 workaround: if we sent disco packets over
|
|
|
|
// Tailscale from ourselves, then drop them, as that shouldn't
|
|
|
|
// happen unless a networking stack is confused, as it seems
|
|
|
|
// macOS in Network Extension mode might be.
|
|
|
|
if p.IPProto == ipproto.UDP && // disco is over UDP; avoid isSelfDisco call for TCP/etc
|
|
|
|
t.isSelfDisco(p) {
|
2021-11-23 19:20:33 +00:00
|
|
|
t.limitedLogf("[unexpected] received self disco out packet over tstun; dropping")
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOutDropSelfDisco.Add(1)
|
2021-09-29 20:58:14 +00:00
|
|
|
return filter.DropSilently
|
|
|
|
}
|
|
|
|
|
2023-03-16 16:29:14 +00:00
|
|
|
if t.PreFilterPacketOutboundToWireGuardNetstackIntercept != nil {
|
|
|
|
if res := t.PreFilterPacketOutboundToWireGuardNetstackIntercept(p, t); res.IsDrop() {
|
2022-04-14 21:59:46 +00:00
|
|
|
// Handled by netstack.Impl.handleLocalPackets (quad-100 DNS primarily)
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
}
|
2023-03-16 16:29:14 +00:00
|
|
|
if t.PreFilterPacketOutboundToWireGuardEngineIntercept != nil {
|
|
|
|
if res := t.PreFilterPacketOutboundToWireGuardEngineIntercept(p, t); res.IsDrop() {
|
2022-04-14 21:59:46 +00:00
|
|
|
// Handled by userspaceEngine.handleLocalPackets (primarily handles
|
|
|
|
// quad-100 if netstack is not installed).
|
2021-01-12 20:03:41 +00:00
|
|
|
return res
|
2020-06-08 22:19:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-04 04:31:40 +00:00
|
|
|
filt := t.filter.Load()
|
2020-05-13 13:16:17 +00:00
|
|
|
if filt == nil {
|
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
if filt.RunOut(p, t.filterFlags) != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOutDropFilter.Add(1)
|
2020-06-08 22:19:26 +00:00
|
|
|
return filter.Drop
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2020-06-04 22:42:44 +00:00
|
|
|
|
2023-03-16 16:29:14 +00:00
|
|
|
if t.PostFilterPacketOutboundToWireGuard != nil {
|
|
|
|
if res := t.PostFilterPacketOutboundToWireGuard(p, t); res.IsDrop() {
|
2021-01-12 20:03:41 +00:00
|
|
|
return res
|
2020-06-08 22:19:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return filter.Accept
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 21:19:12 +00:00
|
|
|
// noteActivity records that there was a read or write at the current time.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) noteActivity() {
|
2021-07-21 17:43:53 +00:00
|
|
|
t.lastActivityAtomic.StoreAtomic(mono.Now())
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IdleDuration reports how long it's been since the last read or write to this device.
|
|
|
|
//
|
2021-07-21 17:43:53 +00:00
|
|
|
// Its value should only be presumed accurate to roughly 10ms granularity.
|
|
|
|
// If there's never been activity, the duration is since the wrapper was created.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) IdleDuration() time.Duration {
|
2021-07-21 17:43:53 +00:00
|
|
|
return mono.Since(t.lastActivityAtomic.LoadAtomic())
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
|
2023-10-13 19:41:10 +00:00
|
|
|
if !t.started.Load() {
|
|
|
|
<-t.startCh
|
|
|
|
}
|
2022-12-09 20:13:41 +00:00
|
|
|
// packet from OS read and sent to WG
|
2022-12-09 01:58:14 +00:00
|
|
|
res, ok := <-t.vectorOutbound
|
2021-07-01 21:45:17 +00:00
|
|
|
if !ok {
|
2020-05-13 13:16:17 +00:00
|
|
|
return 0, io.EOF
|
2021-07-01 21:45:17 +00:00
|
|
|
}
|
2022-12-15 00:29:34 +00:00
|
|
|
if res.err != nil && len(res.data) == 0 {
|
2021-07-01 21:45:17 +00:00
|
|
|
return 0, res.err
|
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
if res.data == nil {
|
|
|
|
n, err := t.injectedRead(res.injected, buffs[0], offset)
|
|
|
|
sizes[0] = n
|
|
|
|
if err != nil && n == 0 {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1, err
|
|
|
|
}
|
|
|
|
|
|
|
|
metricPacketOut.Add(int64(len(res.data)))
|
|
|
|
|
|
|
|
var buffsPos int
|
2022-12-09 11:46:37 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
|
|
|
defer parsedPacketPool.Put(p)
|
2023-03-30 19:39:39 +00:00
|
|
|
captHook := t.captureHook.Load()
|
2022-12-09 01:58:14 +00:00
|
|
|
for _, data := range res.data {
|
|
|
|
p.Decode(data[res.dataOffset:])
|
2022-12-09 20:13:41 +00:00
|
|
|
|
2023-10-03 18:55:06 +00:00
|
|
|
t.snat(p)
|
2022-12-09 01:58:14 +00:00
|
|
|
if m := t.destIPActivity.Load(); m != nil {
|
|
|
|
if fn := m[p.Dst.Addr()]; fn != nil {
|
|
|
|
fn()
|
|
|
|
}
|
|
|
|
}
|
2023-03-30 19:39:39 +00:00
|
|
|
if captHook != nil {
|
2023-04-21 14:27:15 +00:00
|
|
|
captHook(capture.FromLocal, t.now(), p.Buffer(), p.CaptureMeta)
|
2023-01-19 22:28:49 +00:00
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
if !t.disableFilter {
|
2023-03-16 16:29:14 +00:00
|
|
|
response := t.filterPacketOutboundToWireGuard(p)
|
2022-12-09 01:58:14 +00:00
|
|
|
if response != filter.Accept {
|
|
|
|
metricPacketOutDrop.Add(1)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2023-03-21 22:07:40 +00:00
|
|
|
n := copy(buffs[buffsPos][offset:], p.Buffer())
|
2022-12-09 01:58:14 +00:00
|
|
|
if n != len(data)-res.dataOffset {
|
|
|
|
panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset))
|
|
|
|
}
|
|
|
|
sizes[buffsPos] = n
|
|
|
|
if stats := t.stats.Load(); stats != nil {
|
2023-03-21 22:07:40 +00:00
|
|
|
stats.UpdateTxVirtual(p.Buffer())
|
2022-12-09 01:58:14 +00:00
|
|
|
}
|
|
|
|
buffsPos++
|
|
|
|
}
|
|
|
|
|
|
|
|
// t.vectorBuffer has a fixed location in memory.
|
|
|
|
// TODO(raggi): add an explicit field and possibly method to the tunVectorReadResult
|
|
|
|
// to signal when sendBufferConsumed should be called.
|
|
|
|
if &res.data[0] == &t.vectorBuffer[0] {
|
|
|
|
// We are done with t.buffer. Let poll() re-use it.
|
|
|
|
t.sendBufferConsumed()
|
|
|
|
}
|
|
|
|
|
|
|
|
t.noteActivity()
|
2022-12-15 00:29:34 +00:00
|
|
|
return buffsPos, res.err
|
2022-12-09 01:58:14 +00:00
|
|
|
}
|
2021-11-17 00:01:42 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// injectedRead handles injected reads, which bypass filters.
|
|
|
|
func (t *Wrapper) injectedRead(res tunInjectedRead, buf []byte, offset int) (int, error) {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOut.Add(1)
|
2022-03-21 21:58:43 +00:00
|
|
|
|
|
|
|
var n int
|
2022-12-05 08:06:30 +00:00
|
|
|
if !res.packet.IsNil() {
|
|
|
|
|
2022-07-21 23:26:02 +00:00
|
|
|
n = copy(buf[offset:], res.packet.NetworkHeader().Slice())
|
|
|
|
n += copy(buf[offset+n:], res.packet.TransportHeader().Slice())
|
|
|
|
n += copy(buf[offset+n:], res.packet.Data().AsRange().ToSlice())
|
2022-03-21 21:58:43 +00:00
|
|
|
res.packet.DecRef()
|
|
|
|
} else {
|
|
|
|
n = copy(buf[offset:], res.data)
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2020-11-10 07:49:09 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
2020-07-23 22:15:28 +00:00
|
|
|
defer parsedPacketPool.Put(p)
|
|
|
|
p.Decode(buf[offset : offset+n])
|
2023-10-03 18:55:06 +00:00
|
|
|
t.snat(p)
|
2020-07-23 22:15:28 +00:00
|
|
|
|
2022-08-04 17:43:49 +00:00
|
|
|
if m := t.destIPActivity.Load(); m != nil {
|
2022-07-25 03:08:42 +00:00
|
|
|
if fn := m[p.Dst.Addr()]; fn != nil {
|
2020-12-20 00:43:25 +00:00
|
|
|
fn()
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-28 23:59:33 +00:00
|
|
|
if stats := t.stats.Load(); stats != nil {
|
|
|
|
stats.UpdateTxVirtual(buf[offset:][:n])
|
2022-10-05 19:24:30 +00:00
|
|
|
}
|
2020-06-25 21:19:12 +00:00
|
|
|
t.noteActivity()
|
2020-05-13 13:16:17 +00:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2023-03-30 19:39:39 +00:00
|
|
|
func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback) filter.Response {
|
|
|
|
if captHook != nil {
|
2023-04-21 14:27:15 +00:00
|
|
|
captHook(capture.FromPeer, t.now(), p.Buffer(), p.CaptureMeta)
|
2023-01-19 22:28:49 +00:00
|
|
|
}
|
|
|
|
|
2021-03-23 22:16:15 +00:00
|
|
|
if p.IPProto == ipproto.TSMP {
|
|
|
|
if pingReq, ok := p.AsTSMPPing(); ok {
|
|
|
|
t.noteActivity()
|
|
|
|
t.injectOutboundPong(p, pingReq)
|
|
|
|
return filter.DropSilently
|
|
|
|
} else if data, ok := p.AsTSMPPong(); ok {
|
|
|
|
if f := t.OnTSMPPongReceived; f != nil {
|
|
|
|
f(data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-22 01:49:01 +00:00
|
|
|
if p.IsEchoResponse() {
|
|
|
|
if f := t.OnICMPEchoResponseReceived; f != nil && f(p) {
|
|
|
|
// Note: this looks dropped in metrics, even though it was
|
|
|
|
// handled internally.
|
|
|
|
return filter.DropSilently
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:21:40 +00:00
|
|
|
// Issue 1526 workaround: if we see disco packets over
|
|
|
|
// Tailscale from ourselves, then drop them, as that shouldn't
|
|
|
|
// happen unless a networking stack is confused, as it seems
|
|
|
|
// macOS in Network Extension mode might be.
|
|
|
|
if p.IPProto == ipproto.UDP && // disco is over UDP; avoid isSelfDisco call for TCP/etc
|
|
|
|
t.isSelfDisco(p) {
|
2021-11-23 19:20:33 +00:00
|
|
|
t.limitedLogf("[unexpected] received self disco in packet over tstun; dropping")
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketInDropSelfDisco.Add(1)
|
2021-09-13 21:21:40 +00:00
|
|
|
return filter.DropSilently
|
|
|
|
}
|
|
|
|
|
2023-03-16 16:29:14 +00:00
|
|
|
if t.PreFilterPacketInboundFromWireGuard != nil {
|
|
|
|
if res := t.PreFilterPacketInboundFromWireGuard(p, t); res.IsDrop() {
|
2021-01-12 20:03:41 +00:00
|
|
|
return res
|
2020-06-08 22:19:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-04 04:31:40 +00:00
|
|
|
filt := t.filter.Load()
|
2020-05-13 13:16:17 +00:00
|
|
|
if filt == nil {
|
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
2021-04-07 18:32:53 +00:00
|
|
|
outcome := filt.RunIn(p, t.filterFlags)
|
|
|
|
|
|
|
|
// Let peerapi through the filter; its ACLs are handled at L7,
|
|
|
|
// not at the packet level.
|
|
|
|
if outcome != filter.Accept &&
|
|
|
|
p.IPProto == ipproto.TCP &&
|
|
|
|
p.TCPFlags&packet.TCPSyn != 0 &&
|
|
|
|
t.PeerAPIPort != nil {
|
2022-07-25 03:08:42 +00:00
|
|
|
if port, ok := t.PeerAPIPort(p.Dst.Addr()); ok && port == p.Dst.Port() {
|
2021-04-07 18:32:53 +00:00
|
|
|
outcome = filter.Accept
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if outcome != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketInDropFilter.Add(1)
|
2021-01-12 20:03:41 +00:00
|
|
|
|
|
|
|
// Tell them, via TSMP, we're dropping them due to the ACL.
|
|
|
|
// Their host networking stack can translate this into ICMP
|
|
|
|
// or whatnot as required. But notably, their GUI or tailscale CLI
|
|
|
|
// can show them a rejection history with reasons.
|
2021-04-07 18:32:53 +00:00
|
|
|
if p.IPVersion == 4 && p.IPProto == ipproto.TCP && p.TCPFlags&packet.TCPSyn != 0 && !t.disableTSMPRejected {
|
2021-01-12 20:03:41 +00:00
|
|
|
rj := packet.TailscaleRejectedHeader{
|
2022-07-25 03:08:42 +00:00
|
|
|
IPSrc: p.Dst.Addr(),
|
|
|
|
IPDst: p.Src.Addr(),
|
2021-01-12 20:03:41 +00:00
|
|
|
Src: p.Src,
|
|
|
|
Dst: p.Dst,
|
|
|
|
Proto: p.IPProto,
|
|
|
|
Reason: packet.RejectedDueToACLs,
|
|
|
|
}
|
|
|
|
if filt.ShieldsUp() {
|
|
|
|
rj.Reason = packet.RejectedDueToShieldsUp
|
|
|
|
}
|
|
|
|
pkt := packet.Generate(rj, nil)
|
|
|
|
t.InjectOutbound(pkt)
|
|
|
|
|
|
|
|
// TODO(bradfitz): also send a TCP RST, after the TSMP message.
|
|
|
|
}
|
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
2024-02-28 04:25:36 +00:00
|
|
|
if t.PostFilterPacketInboundFromWireGuard != nil {
|
|
|
|
if res := t.PostFilterPacketInboundFromWireGuard(p, t); res.IsDrop() {
|
2021-01-22 21:22:32 +00:00
|
|
|
return res
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 22:42:44 +00:00
|
|
|
|
2020-06-08 22:19:26 +00:00
|
|
|
return filter.Accept
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
// Write accepts incoming packets. The packets begins at buffs[:][offset:],
|
2021-01-12 20:03:41 +00:00
|
|
|
// like wireguard-go/tun.Device.Write.
|
2022-12-09 01:58:14 +00:00
|
|
|
func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) {
|
|
|
|
metricPacketIn.Add(int64(len(buffs)))
|
|
|
|
i := 0
|
2022-12-09 20:13:41 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
|
|
|
defer parsedPacketPool.Put(p)
|
2023-03-30 19:39:39 +00:00
|
|
|
captHook := t.captureHook.Load()
|
2022-12-09 20:13:41 +00:00
|
|
|
for _, buff := range buffs {
|
|
|
|
p.Decode(buff[offset:])
|
2023-10-03 18:55:06 +00:00
|
|
|
t.dnat(p)
|
2022-12-09 20:13:41 +00:00
|
|
|
if !t.disableFilter {
|
2023-03-30 19:39:39 +00:00
|
|
|
if t.filterPacketInboundFromWireGuard(p, captHook) != filter.Accept {
|
2022-12-09 01:58:14 +00:00
|
|
|
metricPacketInDrop.Add(1)
|
|
|
|
} else {
|
|
|
|
buffs[i] = buff
|
|
|
|
i++
|
|
|
|
}
|
2021-01-12 20:03:41 +00:00
|
|
|
}
|
2022-12-09 20:13:41 +00:00
|
|
|
}
|
|
|
|
if t.disableFilter {
|
2022-12-09 01:58:14 +00:00
|
|
|
i = len(buffs)
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
buffs = buffs[:i]
|
2020-05-13 13:16:17 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
if len(buffs) > 0 {
|
|
|
|
t.noteActivity()
|
|
|
|
_, err := t.tdevWrite(buffs, offset)
|
|
|
|
return len(buffs), err
|
|
|
|
}
|
|
|
|
return 0, nil
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) {
|
2022-11-28 23:59:33 +00:00
|
|
|
if stats := t.stats.Load(); stats != nil {
|
2022-12-09 01:58:14 +00:00
|
|
|
for i := range buffs {
|
|
|
|
stats.UpdateRxVirtual((buffs)[i][offset:])
|
|
|
|
}
|
2021-07-23 16:45:04 +00:00
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
return t.tdev.Write(buffs, offset)
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) GetFilter() *filter.Filter {
|
2022-08-04 04:31:40 +00:00
|
|
|
return t.filter.Load()
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) SetFilter(filt *filter.Filter) {
|
2020-05-13 13:16:17 +00:00
|
|
|
t.filter.Store(filt)
|
|
|
|
}
|
|
|
|
|
2022-04-30 23:13:18 +00:00
|
|
|
// InjectInboundPacketBuffer makes the Wrapper device behave as if a packet
|
2022-04-14 22:17:26 +00:00
|
|
|
// with the given contents was received from the network.
|
|
|
|
// It takes ownership of one reference count on the packet. The injected
|
|
|
|
// packet will not pass through inbound filters.
|
|
|
|
//
|
|
|
|
// This path is typically used to deliver synthesized packets to the
|
|
|
|
// host networking stack.
|
2024-03-07 00:56:02 +00:00
|
|
|
func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer) error {
|
2022-05-04 19:10:17 +00:00
|
|
|
buf := make([]byte, PacketStartOffset+pkt.Size())
|
2022-04-14 22:17:26 +00:00
|
|
|
|
2022-07-21 23:26:02 +00:00
|
|
|
n := copy(buf[PacketStartOffset:], pkt.NetworkHeader().Slice())
|
|
|
|
n += copy(buf[PacketStartOffset+n:], pkt.TransportHeader().Slice())
|
|
|
|
n += copy(buf[PacketStartOffset+n:], pkt.Data().AsRange().ToSlice())
|
2022-04-14 22:17:26 +00:00
|
|
|
if n != pkt.Size() {
|
2022-04-30 23:13:18 +00:00
|
|
|
panic("unexpected packet size after copy")
|
2022-04-14 22:17:26 +00:00
|
|
|
}
|
|
|
|
pkt.DecRef()
|
|
|
|
|
2022-12-09 20:13:41 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
|
|
|
defer parsedPacketPool.Put(p)
|
|
|
|
p.Decode(buf[PacketStartOffset:])
|
2023-03-30 19:39:39 +00:00
|
|
|
captHook := t.captureHook.Load()
|
|
|
|
if captHook != nil {
|
2023-04-21 14:27:15 +00:00
|
|
|
captHook(capture.SynthesizedToLocal, t.now(), p.Buffer(), p.CaptureMeta)
|
2023-03-21 22:07:40 +00:00
|
|
|
}
|
2023-10-03 18:55:06 +00:00
|
|
|
t.dnat(p)
|
2023-01-19 22:28:49 +00:00
|
|
|
|
2022-04-14 22:17:26 +00:00
|
|
|
return t.InjectInboundDirect(buf, PacketStartOffset)
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// InjectInboundDirect makes the Wrapper device behave as if a packet
|
2020-05-13 13:16:17 +00:00
|
|
|
// with the given contents was received from the network.
|
|
|
|
// It blocks and does not take ownership of the packet.
|
2020-06-08 22:19:26 +00:00
|
|
|
// The injected packet will not pass through inbound filters.
|
|
|
|
//
|
|
|
|
// The packet contents are to start at &buf[offset].
|
|
|
|
// offset must be greater or equal to PacketStartOffset.
|
2022-05-04 19:10:17 +00:00
|
|
|
// The space before &buf[offset] will be used by WireGuard.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) InjectInboundDirect(buf []byte, offset int) error {
|
2020-06-08 22:19:26 +00:00
|
|
|
if len(buf) > MaxPacketSize {
|
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if len(buf) < offset {
|
|
|
|
return errOffsetTooBig
|
|
|
|
}
|
|
|
|
if offset < PacketStartOffset {
|
|
|
|
return errOffsetTooSmall
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write to the underlying device to skip filters.
|
2022-12-09 01:58:14 +00:00
|
|
|
_, err := t.tdevWrite([][]byte{buf}, offset) // TODO(jwhited): alloc?
|
2020-06-08 22:19:26 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// InjectInboundCopy takes a packet without leading space,
|
2020-09-25 19:24:44 +00:00
|
|
|
// reallocates it to conform to the InjectInboundDirect interface
|
2020-06-08 22:19:26 +00:00
|
|
|
// and calls InjectInboundDirect on it. Injecting a nil packet is a no-op.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) InjectInboundCopy(packet []byte) error {
|
2020-06-08 22:19:26 +00:00
|
|
|
// We duplicate this check from InjectInboundDirect here
|
|
|
|
// to avoid wasting an allocation on an oversized packet.
|
2020-05-13 13:16:17 +00:00
|
|
|
if len(packet) > MaxPacketSize {
|
2020-05-26 22:14:19 +00:00
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if len(packet) == 0 {
|
|
|
|
return nil
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2020-06-08 22:19:26 +00:00
|
|
|
|
|
|
|
buf := make([]byte, PacketStartOffset+len(packet))
|
|
|
|
copy(buf[PacketStartOffset:], packet)
|
|
|
|
|
|
|
|
return t.InjectInboundDirect(buf, PacketStartOffset)
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) injectOutboundPong(pp *packet.Parsed, req packet.TSMPPingRequest) {
|
2021-03-23 22:16:15 +00:00
|
|
|
pong := packet.TSMPPongReply{
|
|
|
|
Data: req.Data,
|
|
|
|
}
|
2021-03-29 22:17:05 +00:00
|
|
|
if t.PeerAPIPort != nil {
|
2022-07-25 03:08:42 +00:00
|
|
|
pong.PeerAPIPort, _ = t.PeerAPIPort(pp.Dst.Addr())
|
2021-03-29 22:17:05 +00:00
|
|
|
}
|
2021-03-23 22:16:15 +00:00
|
|
|
switch pp.IPVersion {
|
|
|
|
case 4:
|
|
|
|
h4 := pp.IP4Header()
|
|
|
|
h4.ToResponse()
|
|
|
|
pong.IPHeader = h4
|
|
|
|
case 6:
|
|
|
|
h6 := pp.IP6Header()
|
|
|
|
h6.ToResponse()
|
|
|
|
pong.IPHeader = h6
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
t.InjectOutbound(packet.Generate(pong, nil))
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// InjectOutbound makes the Wrapper device behave as if a packet
|
2020-05-13 13:16:17 +00:00
|
|
|
// with the given contents was sent to the network.
|
|
|
|
// It does not block, but takes ownership of the packet.
|
2020-06-08 22:19:26 +00:00
|
|
|
// The injected packet will not pass through outbound filters.
|
2020-05-26 22:14:19 +00:00
|
|
|
// Injecting an empty packet is a no-op.
|
2023-04-21 14:27:15 +00:00
|
|
|
func (t *Wrapper) InjectOutbound(pkt []byte) error {
|
|
|
|
if len(pkt) > MaxPacketSize {
|
2020-05-26 22:14:19 +00:00
|
|
|
return errPacketTooBig
|
|
|
|
}
|
2023-04-21 14:27:15 +00:00
|
|
|
if len(pkt) == 0 {
|
2020-05-26 22:14:19 +00:00
|
|
|
return nil
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2023-04-21 14:27:15 +00:00
|
|
|
t.injectOutbound(tunInjectedRead{data: pkt})
|
2022-03-21 21:58:43 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one
|
|
|
|
// reference count on the packet, and the packet may be mutated. The packet refcount will be
|
|
|
|
// decremented after the injected buffer has been read.
|
2024-03-07 00:56:02 +00:00
|
|
|
func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error {
|
2023-03-30 19:39:39 +00:00
|
|
|
size := pkt.Size()
|
2022-03-21 21:58:43 +00:00
|
|
|
if size > MaxPacketSize {
|
2023-03-30 19:39:39 +00:00
|
|
|
pkt.DecRef()
|
2022-03-21 21:58:43 +00:00
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if size == 0 {
|
2023-03-30 19:39:39 +00:00
|
|
|
pkt.DecRef()
|
2022-03-21 21:58:43 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-01-19 22:28:49 +00:00
|
|
|
if capt := t.captureHook.Load(); capt != nil {
|
2023-03-30 19:39:39 +00:00
|
|
|
b := pkt.ToBuffer()
|
2023-04-21 14:27:15 +00:00
|
|
|
capt(capture.SynthesizedToPeer, t.now(), b.Flatten(), packet.CaptureMeta{})
|
2023-01-19 22:28:49 +00:00
|
|
|
}
|
|
|
|
|
2023-03-30 19:39:39 +00:00
|
|
|
t.injectOutbound(tunInjectedRead{packet: pkt})
|
2021-07-01 21:45:17 +00:00
|
|
|
return nil
|
2020-05-13 13:16:17 +00:00
|
|
|
}
|
2020-05-15 07:06:30 +00:00
|
|
|
|
2022-12-09 01:58:14 +00:00
|
|
|
func (t *Wrapper) BatchSize() int {
|
|
|
|
return t.tdev.BatchSize()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// Unwrap returns the underlying tun.Device.
|
|
|
|
func (t *Wrapper) Unwrap() tun.Device {
|
2020-05-15 07:06:30 +00:00
|
|
|
return t.tdev
|
|
|
|
}
|
2021-11-17 00:01:42 +00:00
|
|
|
|
2022-11-28 23:59:33 +00:00
|
|
|
// SetStatistics specifies a per-connection statistics aggregator.
|
|
|
|
// Nil may be specified to disable statistics gathering.
|
|
|
|
func (t *Wrapper) SetStatistics(stats *connstats.Statistics) {
|
|
|
|
t.stats.Store(stats)
|
2022-10-05 19:24:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-17 00:01:42 +00:00
|
|
|
var (
|
2022-05-11 23:15:31 +00:00
|
|
|
metricPacketIn = clientmetric.NewCounter("tstun_in_from_wg")
|
|
|
|
metricPacketInDrop = clientmetric.NewCounter("tstun_in_from_wg_drop")
|
|
|
|
metricPacketInDropFilter = clientmetric.NewCounter("tstun_in_from_wg_drop_filter")
|
|
|
|
metricPacketInDropSelfDisco = clientmetric.NewCounter("tstun_in_from_wg_drop_self_disco")
|
|
|
|
|
|
|
|
metricPacketOut = clientmetric.NewCounter("tstun_out_to_wg")
|
|
|
|
metricPacketOutDrop = clientmetric.NewCounter("tstun_out_to_wg_drop")
|
|
|
|
metricPacketOutDropFilter = clientmetric.NewCounter("tstun_out_to_wg_drop_filter")
|
|
|
|
metricPacketOutDropSelfDisco = clientmetric.NewCounter("tstun_out_to_wg_drop_self_disco")
|
2021-11-17 00:01:42 +00:00
|
|
|
)
|
2023-01-19 22:28:49 +00:00
|
|
|
|
2023-02-08 23:48:27 +00:00
|
|
|
func (t *Wrapper) InstallCaptureHook(cb capture.Callback) {
|
2023-01-19 22:28:49 +00:00
|
|
|
t.captureHook.Store(cb)
|
|
|
|
}
|