2020-03-18 04:28:47 +00:00
|
|
|
// Copyright (c) 2019 Tailscale Inc & AUTHORS All rights reserved.
|
2020-02-05 22:16:58 +00:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Package magicsock implements a socket that can change its communication path while
|
|
|
|
// in use, actively searching for the best way to communicate.
|
|
|
|
package magicsock
|
|
|
|
|
|
|
|
import (
|
2020-05-31 22:29:04 +00:00
|
|
|
"bufio"
|
2020-02-05 22:16:58 +00:00
|
|
|
"context"
|
2020-07-01 19:56:17 +00:00
|
|
|
crand "crypto/rand"
|
2020-02-05 22:16:58 +00:00
|
|
|
"encoding/binary"
|
2021-04-30 23:45:36 +00:00
|
|
|
"encoding/json"
|
2020-02-18 21:32:04 +00:00
|
|
|
"errors"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
2020-03-04 06:21:56 +00:00
|
|
|
"hash/fnv"
|
2020-07-03 18:06:33 +00:00
|
|
|
"math"
|
2020-03-04 06:21:56 +00:00
|
|
|
"math/rand"
|
2020-02-05 22:16:58 +00:00
|
|
|
"net"
|
2020-02-21 22:01:51 +00:00
|
|
|
"os"
|
2020-05-17 16:51:38 +00:00
|
|
|
"reflect"
|
2020-03-23 21:12:23 +00:00
|
|
|
"sort"
|
2020-02-21 22:01:51 +00:00
|
|
|
"strconv"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-02-18 16:57:11 +00:00
|
|
|
"sync/atomic"
|
2020-02-05 22:16:58 +00:00
|
|
|
"time"
|
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
"golang.org/x/crypto/nacl/box"
|
2020-03-02 01:35:10 +00:00
|
|
|
"golang.org/x/time/rate"
|
2021-05-25 19:42:22 +00:00
|
|
|
"golang.zx2c4.com/wireguard/conn"
|
2020-04-17 20:51:52 +00:00
|
|
|
"inet.af/netaddr"
|
2020-06-25 18:04:52 +00:00
|
|
|
"tailscale.com/control/controlclient"
|
2020-02-21 03:10:54 +00:00
|
|
|
"tailscale.com/derp"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/derp/derphttp"
|
2020-06-30 19:22:42 +00:00
|
|
|
"tailscale.com/disco"
|
2021-02-25 05:29:51 +00:00
|
|
|
"tailscale.com/health"
|
2020-03-26 05:57:46 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2020-10-19 22:11:40 +00:00
|
|
|
"tailscale.com/logtail/backoff"
|
2020-03-05 18:29:19 +00:00
|
|
|
"tailscale.com/net/dnscache"
|
2020-03-10 18:02:30 +00:00
|
|
|
"tailscale.com/net/interfaces"
|
2020-05-25 16:15:50 +00:00
|
|
|
"tailscale.com/net/netcheck"
|
2020-05-28 22:27:04 +00:00
|
|
|
"tailscale.com/net/netns"
|
2021-02-20 06:15:41 +00:00
|
|
|
"tailscale.com/net/portmapper"
|
2020-05-25 16:15:50 +00:00
|
|
|
"tailscale.com/net/stun"
|
2020-03-12 18:16:54 +00:00
|
|
|
"tailscale.com/syncs"
|
2020-03-04 06:21:56 +00:00
|
|
|
"tailscale.com/tailcfg"
|
2021-01-20 17:52:24 +00:00
|
|
|
"tailscale.com/tstime"
|
2020-02-17 21:52:11 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-03-04 06:21:56 +00:00
|
|
|
"tailscale.com/types/logger"
|
2021-02-05 23:44:46 +00:00
|
|
|
"tailscale.com/types/netmap"
|
2020-07-10 21:26:04 +00:00
|
|
|
"tailscale.com/types/nettype"
|
2020-12-30 01:22:56 +00:00
|
|
|
"tailscale.com/types/wgkey"
|
2021-04-27 21:40:29 +00:00
|
|
|
"tailscale.com/util/uniq"
|
2020-03-02 20:37:52 +00:00
|
|
|
"tailscale.com/version"
|
2021-03-15 20:58:10 +00:00
|
|
|
"tailscale.com/wgengine/monitor"
|
2021-02-05 20:44:43 +00:00
|
|
|
"tailscale.com/wgengine/wgcfg"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
2020-07-25 00:32:18 +00:00
|
|
|
// Various debugging and experimental tweakables, set by environment
|
|
|
|
// variable.
|
|
|
|
var (
|
|
|
|
// logPacketDests prints the known addresses for a peer every time
|
|
|
|
// they change, in the legacy (non-discovery) endpoint code only.
|
|
|
|
logPacketDests, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_LOG_PACKET_DESTS"))
|
|
|
|
// debugDisco prints verbose logs of active discovery events as
|
|
|
|
// they happen.
|
|
|
|
debugDisco, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_DISCO"))
|
|
|
|
// debugOmitLocalAddresses removes all local interface addresses
|
|
|
|
// from magicsock's discovered local endpoints. Used in some tests.
|
|
|
|
debugOmitLocalAddresses, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_OMIT_LOCAL_ADDRS"))
|
|
|
|
// debugUseDerpRoute temporarily (2020-03-22) controls whether DERP
|
|
|
|
// reverse routing is enabled (Issue 150). It will become always true
|
|
|
|
// later.
|
2020-08-17 19:56:17 +00:00
|
|
|
debugUseDerpRouteEnv = os.Getenv("TS_DEBUG_ENABLE_DERP_ROUTE")
|
|
|
|
debugUseDerpRoute, _ = strconv.ParseBool(debugUseDerpRouteEnv)
|
2020-07-25 00:32:18 +00:00
|
|
|
// logDerpVerbose logs all received DERP packets, including their
|
|
|
|
// full payload.
|
|
|
|
logDerpVerbose, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_DERP"))
|
|
|
|
// debugReSTUNStopOnIdle unconditionally enables the "shut down
|
|
|
|
// STUN if magicsock is idle" behavior that normally only triggers
|
|
|
|
// on mobile devices, lowers the shutdown interval, and logs more
|
|
|
|
// verbosely about idle measurements.
|
|
|
|
debugReSTUNStopOnIdle, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_RESTUN_STOP_ON_IDLE"))
|
2021-07-13 23:01:37 +00:00
|
|
|
// debugAlwaysDERP disables the use of UDP, forcing all peer communication over DERP.
|
|
|
|
debugAlwaysDERP, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_ALWAYS_USE_DERP"))
|
2020-07-25 00:32:18 +00:00
|
|
|
)
|
|
|
|
|
2020-08-17 19:56:17 +00:00
|
|
|
// useDerpRoute reports whether magicsock should enable the DERP
|
|
|
|
// return path optimization (Issue 150).
|
|
|
|
func useDerpRoute() bool {
|
|
|
|
if debugUseDerpRouteEnv != "" {
|
|
|
|
return debugUseDerpRoute
|
|
|
|
}
|
|
|
|
ob := controlclient.DERPRouteFlag()
|
|
|
|
if v, ok := ob.Get(); ok {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-07-27 20:25:25 +00:00
|
|
|
// inTest reports whether the running program is a test that set the
|
|
|
|
// IN_TS_TEST environment variable.
|
2020-07-25 00:32:18 +00:00
|
|
|
//
|
|
|
|
// Unlike the other debug tweakables above, this one needs to be
|
|
|
|
// checked every time at runtime, because tests set this after program
|
|
|
|
// startup.
|
|
|
|
func inTest() bool {
|
|
|
|
inTest, _ := strconv.ParseBool(os.Getenv("IN_TS_TEST"))
|
|
|
|
return inTest
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// A Conn routes UDP packets and actively manages a list of its endpoints.
|
2020-04-06 21:44:10 +00:00
|
|
|
// It implements wireguard/conn.Bind.
|
2020-02-05 22:16:58 +00:00
|
|
|
type Conn struct {
|
2020-12-15 07:58:35 +00:00
|
|
|
// This block mirrors the contents and field order of the Options
|
|
|
|
// struct. Initialized once at construction, then constant.
|
|
|
|
|
|
|
|
logf logger.Logf
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
epFunc func([]tailcfg.Endpoint)
|
2020-08-25 20:21:29 +00:00
|
|
|
derpActiveFunc func()
|
2020-12-15 07:58:35 +00:00
|
|
|
idleFunc func() time.Duration // nil means unknown
|
|
|
|
packetListener nettype.PacketListener
|
2020-07-23 22:15:28 +00:00
|
|
|
noteRecvActivity func(tailcfg.DiscoKey) // or nil, see Options.NoteRecvActivity
|
2020-10-28 15:23:12 +00:00
|
|
|
simulatedNetwork bool
|
2021-01-15 22:55:44 +00:00
|
|
|
disableLegacy bool
|
2020-02-18 18:55:25 +00:00
|
|
|
|
2020-12-15 07:58:35 +00:00
|
|
|
// ================================================================
|
|
|
|
// No locking required to access these fields, either because
|
|
|
|
// they're static after construction, or are wholly owned by a
|
|
|
|
// single goroutine.
|
|
|
|
|
|
|
|
connCtx context.Context // closed on Conn.Close
|
|
|
|
connCtxCancel func() // closes connCtx
|
2021-01-16 03:13:59 +00:00
|
|
|
donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call
|
2020-12-15 07:58:35 +00:00
|
|
|
|
|
|
|
// pconn4 and pconn6 are the underlying UDP sockets used to
|
|
|
|
// send/receive packets for wireguard and other magicsock
|
|
|
|
// protocols.
|
|
|
|
pconn4 *RebindingUDPConn
|
|
|
|
pconn6 *RebindingUDPConn
|
|
|
|
|
|
|
|
// netChecker is the prober that discovers local network
|
|
|
|
// conditions, including the closest DERP relay and NAT mappings.
|
|
|
|
netChecker *netcheck.Client
|
|
|
|
|
2021-02-20 06:15:41 +00:00
|
|
|
// portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting
|
|
|
|
// port mappings from NAT devices.
|
|
|
|
portMapper *portmapper.Client
|
|
|
|
|
2020-12-15 07:58:35 +00:00
|
|
|
// sendLogLimit is a rate limiter for errors logged in the (hot)
|
|
|
|
// packet sending codepath. It's so that, if magicsock gets into a
|
|
|
|
// bad state, we don't spam one error per wireguard packet being
|
|
|
|
// transmitted.
|
|
|
|
// TODO(danderson): now that we have global rate-limiting, is this still useful?
|
|
|
|
sendLogLimit *rate.Limiter
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// stunReceiveFunc holds the current STUN packet processing func.
|
|
|
|
// Its Loaded value is always non-nil.
|
|
|
|
stunReceiveFunc atomic.Value // of func(p []byte, fromAddr *net.UDPAddr)
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// derpRecvCh is used by receiveDERP to read DERP messages.
|
2020-03-13 15:55:38 +00:00
|
|
|
derpRecvCh chan derpReadResult
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// bind is the wireguard-go conn.Bind for Conn.
|
|
|
|
bind *connBind
|
2021-01-18 16:39:52 +00:00
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// ippEndpoint4 and ippEndpoint6 are owned by receiveIPv4 and
|
|
|
|
// receiveIPv6, respectively, to cache an IPPort->endpoint for
|
2021-01-18 23:27:44 +00:00
|
|
|
// hot flows.
|
|
|
|
ippEndpoint4, ippEndpoint6 ippEndpointCache
|
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
// ============================================================
|
2021-06-22 20:00:40 +00:00
|
|
|
// Fields that must be accessed via atomic load/stores.
|
|
|
|
|
|
|
|
// noV4 and noV6 are whether IPv4 and IPv6 are known to be
|
|
|
|
// missing. They're only used to suppress log spam. The name
|
|
|
|
// is named negatively because in early start-up, we don't yet
|
|
|
|
// necessarily have a netcheck.Report and don't want to skip
|
|
|
|
// logging.
|
|
|
|
noV4, noV6 syncs.AtomicBool
|
|
|
|
|
|
|
|
// networkUp is whether the network is up (some interface is up
|
|
|
|
// with IPv4 or IPv6). It's used to suppress log spam and prevent
|
|
|
|
// new connection that'll fail.
|
|
|
|
networkUp syncs.AtomicBool
|
|
|
|
|
|
|
|
// havePrivateKey is whether privateKey is non-zero.
|
|
|
|
havePrivateKey syncs.AtomicBool
|
|
|
|
|
|
|
|
// port is the preferred port from opts.Port; 0 means auto.
|
|
|
|
port syncs.AtomicUint32
|
|
|
|
|
|
|
|
// ============================================================
|
|
|
|
// mu guards all following fields; see userspaceEngine lock ordering rules
|
|
|
|
mu sync.Mutex
|
2020-08-04 16:36:38 +00:00
|
|
|
muCond *sync.Cond
|
2020-03-13 15:55:38 +00:00
|
|
|
|
2020-06-28 18:53:37 +00:00
|
|
|
started bool // Start was called
|
|
|
|
closed bool // Close was called
|
2020-03-13 15:55:38 +00:00
|
|
|
|
2021-01-19 23:29:50 +00:00
|
|
|
// derpCleanupTimer is the timer that fires to occasionally clean
|
|
|
|
// up idle DERP connections. It's only used when there is a non-home
|
|
|
|
// DERP connection in use.
|
|
|
|
derpCleanupTimer *time.Timer
|
|
|
|
|
|
|
|
// derpCleanupTimerArmed is whether derpCleanupTimer is
|
|
|
|
// scheduled to fire within derpCleanStaleInterval.
|
|
|
|
derpCleanupTimerArmed bool
|
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
// periodicReSTUNTimer, when non-nil, is an AfterFunc timer
|
|
|
|
// that will call Conn.doPeriodicSTUN.
|
|
|
|
periodicReSTUNTimer *time.Timer
|
|
|
|
|
2020-12-15 07:58:35 +00:00
|
|
|
// endpointsUpdateActive indicates that updateEndpoints is
|
|
|
|
// currently running. It's used to deduplicate concurrent endpoint
|
|
|
|
// update requests.
|
2020-03-13 15:55:38 +00:00
|
|
|
endpointsUpdateActive bool
|
2020-12-15 07:58:35 +00:00
|
|
|
// wantEndpointsUpdate, if non-empty, means that a new endpoints
|
|
|
|
// update should begin immediately after the currently-running one
|
|
|
|
// completes. It can only be non-empty if
|
|
|
|
// endpointsUpdateActive==true.
|
|
|
|
wantEndpointsUpdate string // true if non-empty; string is reason
|
|
|
|
// lastEndpoints records the endpoints found during the previous
|
|
|
|
// endpoint discovery. It's used to avoid duplicate endpoint
|
|
|
|
// change notifications.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
lastEndpoints []tailcfg.Endpoint
|
2020-12-15 07:58:35 +00:00
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
// lastEndpointsTime is the last time the endpoints were updated,
|
|
|
|
// even if there was no change.
|
|
|
|
lastEndpointsTime time.Time
|
|
|
|
|
|
|
|
// onEndpointRefreshed are funcs to run (in their own goroutines)
|
|
|
|
// when endpoints are refreshed.
|
|
|
|
onEndpointRefreshed map[*discoEndpoint]func()
|
|
|
|
|
2020-12-15 07:58:35 +00:00
|
|
|
// peerSet is the set of peers that are currently configured in
|
|
|
|
// WireGuard. These are not used to filter inbound or outbound
|
|
|
|
// traffic at all, but only to track what state can be cleaned up
|
|
|
|
// in other maps below that are keyed by peer public key.
|
|
|
|
peerSet map[key.Public]struct{}
|
|
|
|
|
|
|
|
// discoPrivate is the private naclbox key used for active
|
|
|
|
// discovery traffic. It's created once near (but not during)
|
|
|
|
// construction.
|
|
|
|
discoPrivate key.Private
|
|
|
|
discoPublic tailcfg.DiscoKey // public of discoPrivate
|
|
|
|
discoShort string // ShortString of discoPublic (to save logging work later)
|
|
|
|
// nodeOfDisco tracks the networkmap Node entity for each peer
|
|
|
|
// discovery key.
|
|
|
|
//
|
|
|
|
// TODO(danderson): the only thing we ever use from this is the
|
|
|
|
// peer's WireGuard public key. This could be a map of DiscoKey to
|
|
|
|
// NodeKey.
|
|
|
|
nodeOfDisco map[tailcfg.DiscoKey]*tailcfg.Node
|
|
|
|
discoOfNode map[tailcfg.NodeKey]tailcfg.DiscoKey
|
|
|
|
discoOfAddr map[netaddr.IPPort]tailcfg.DiscoKey // validated non-DERP paths only
|
|
|
|
// endpointsOfDisco tracks the wireguard-go endpoints for peers
|
|
|
|
// with recent activity.
|
2020-07-23 22:15:28 +00:00
|
|
|
endpointOfDisco map[tailcfg.DiscoKey]*discoEndpoint // those with activity only
|
|
|
|
sharedDiscoKey map[tailcfg.DiscoKey]*[32]byte // nacl/box precomputed key
|
2020-06-28 18:53:37 +00:00
|
|
|
|
2020-02-29 19:48:34 +00:00
|
|
|
// addrsByUDP is a map of every remote ip:port to a priority
|
2020-02-05 22:16:58 +00:00
|
|
|
// list of endpoint addresses for a peer.
|
|
|
|
// The priority list is provided by wgengine configuration.
|
|
|
|
//
|
|
|
|
// Given a wgcfg describing:
|
|
|
|
// machineA: 10.0.0.1:1, 10.0.0.2:2
|
|
|
|
// machineB: 10.0.0.3:3
|
2020-02-29 19:48:34 +00:00
|
|
|
// the addrsByUDP map contains:
|
|
|
|
// 10.0.0.1:1 -> [10.0.0.1:1, 10.0.0.2:2]
|
|
|
|
// 10.0.0.2:2 -> [10.0.0.1:1, 10.0.0.2:2]
|
|
|
|
// 10.0.0.3:3 -> [10.0.0.3:3]
|
2020-12-15 07:58:35 +00:00
|
|
|
//
|
|
|
|
// Used only to communicate with legacy, pre-active-discovery
|
|
|
|
// clients.
|
2020-12-18 08:22:12 +00:00
|
|
|
addrsByUDP map[netaddr.IPPort]*addrSet
|
2020-04-18 15:48:01 +00:00
|
|
|
// addrsByKey maps from public keys (as seen by incoming DERP
|
2020-12-18 08:22:12 +00:00
|
|
|
// packets) to its addrSet (the same values as in addrsByUDP).
|
2020-12-15 07:58:35 +00:00
|
|
|
//
|
|
|
|
// Used only to communicate with legacy, pre-active-discovery
|
|
|
|
// clients.
|
2020-12-18 08:22:12 +00:00
|
|
|
addrsByKey map[key.Public]*addrSet
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-12-15 07:58:35 +00:00
|
|
|
// netInfoFunc is a callback that provides a tailcfg.NetInfo when
|
|
|
|
// discovered network conditions change.
|
|
|
|
//
|
|
|
|
// TODO(danderson): why can't it be set at construction time?
|
|
|
|
// There seem to be a few natural places in ipn/local.go to
|
|
|
|
// swallow untimely invocations.
|
2020-03-04 06:21:56 +00:00
|
|
|
netInfoFunc func(*tailcfg.NetInfo) // nil until set
|
2020-12-15 07:58:35 +00:00
|
|
|
// netInfoLast is the NetInfo provided in the last call to
|
|
|
|
// netInfoFunc. It's used to deduplicate calls to netInfoFunc.
|
|
|
|
//
|
|
|
|
// TODO(danderson): should all the deduping happen in
|
|
|
|
// ipn/local.go? We seem to be doing dedupe at several layers, and
|
|
|
|
// magicsock could do with any complexity reduction it can get.
|
2020-03-04 06:21:56 +00:00
|
|
|
netInfoLast *tailcfg.NetInfo
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled
|
2021-02-05 23:44:46 +00:00
|
|
|
netMap *netmap.NetworkMap
|
2020-12-15 07:58:35 +00:00
|
|
|
privateKey key.Private // WireGuard private key for this node
|
2020-07-27 23:26:33 +00:00
|
|
|
everHadKey bool // whether we ever had a non-zero private key
|
2020-05-17 16:51:38 +00:00
|
|
|
myDerp int // nearest DERP region ID; 0 means none/unknown
|
2020-07-27 19:23:14 +00:00
|
|
|
derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close
|
2020-05-17 16:51:38 +00:00
|
|
|
activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region
|
|
|
|
prevDerp map[int]*syncs.WaitGroupChan
|
2020-03-23 21:12:23 +00:00
|
|
|
|
|
|
|
// derpRoute contains optional alternate routes to use as an
|
|
|
|
// optimization instead of contacting a peer via their home
|
|
|
|
// DERP connection. If they sent us a message on a different
|
|
|
|
// DERP connection (which should really only be on our DERP
|
|
|
|
// home connection, or what was once our home), then we
|
|
|
|
// remember that route here to optimistically use instead of
|
|
|
|
// creating a new DERP connection back to their home.
|
2020-04-18 15:48:01 +00:00
|
|
|
derpRoute map[key.Public]derpRoute
|
2020-03-23 21:12:23 +00:00
|
|
|
|
|
|
|
// peerLastDerp tracks which DERP node we last used to speak with a
|
|
|
|
// peer. It's only used to quiet logging, so we only log on change.
|
2020-04-18 15:48:01 +00:00
|
|
|
peerLastDerp map[key.Public]int
|
2020-03-22 01:24:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// derpRoute is a route entry for a public key, saying that a certain
|
|
|
|
// peer should be available at DERP node derpID, as long as the
|
|
|
|
// current connection for that derpID is dc. (but dc should not be
|
|
|
|
// used to write directly; it's owned by the read/write loops)
|
|
|
|
type derpRoute struct {
|
|
|
|
derpID int
|
|
|
|
dc *derphttp.Client // don't use directly; see comment above
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeDerpPeerRoute removes a DERP route entry previously added by addDerpPeerRoute.
|
|
|
|
func (c *Conn) removeDerpPeerRoute(peer key.Public, derpID int, dc *derphttp.Client) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
r2 := derpRoute{derpID, dc}
|
|
|
|
if r, ok := c.derpRoute[peer]; ok && r == r2 {
|
|
|
|
delete(c.derpRoute, peer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addDerpPeerRoute adds a DERP route entry, noting that peer was seen
|
|
|
|
// on DERP node derpID, at least on the connection identified by dc.
|
|
|
|
// See issue 150 for details.
|
|
|
|
func (c *Conn) addDerpPeerRoute(peer key.Public, derpID int, dc *derphttp.Client) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.derpRoute == nil {
|
|
|
|
c.derpRoute = make(map[key.Public]derpRoute)
|
|
|
|
}
|
2020-03-23 21:12:23 +00:00
|
|
|
r := derpRoute{derpID, dc}
|
|
|
|
c.derpRoute[peer] = r
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 22:20:33 +00:00
|
|
|
// DerpMagicIP is a fake WireGuard endpoint IP address that means
|
|
|
|
// to use DERP. When used, the port number of the WireGuard endpoint
|
|
|
|
// is the DERP server number to use.
|
|
|
|
//
|
|
|
|
// Mnemonic: 3.3.40 are numbers above the keys D, E, R, P.
|
|
|
|
const DerpMagicIP = "127.3.3.40"
|
|
|
|
|
2021-02-11 20:39:56 +00:00
|
|
|
var derpMagicIPAddr = netaddr.MustParseIP(DerpMagicIP)
|
2020-03-09 22:20:33 +00:00
|
|
|
|
2020-03-05 16:54:08 +00:00
|
|
|
// activeDerp contains fields for an active DERP connection.
|
|
|
|
type activeDerp struct {
|
2020-03-23 21:12:23 +00:00
|
|
|
c *derphttp.Client
|
|
|
|
cancel context.CancelFunc
|
|
|
|
writeCh chan<- derpWriteRequest
|
|
|
|
// lastWrite is the time of the last request for its write
|
|
|
|
// channel (currently even if there was no write).
|
2021-02-09 17:37:24 +00:00
|
|
|
// It is always non-nil and initialized to a non-zero Time.
|
2020-03-23 21:12:23 +00:00
|
|
|
lastWrite *time.Time
|
|
|
|
createTime time.Time
|
2020-03-05 16:54:08 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// Options contains options for Listen.
|
|
|
|
type Options struct {
|
2020-03-19 16:39:00 +00:00
|
|
|
// Logf optionally provides a log function to use.
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
// Must not be nil.
|
2020-03-07 21:11:52 +00:00
|
|
|
Logf logger.Logf
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// Port is the port to listen on.
|
|
|
|
// Zero means to pick one automatically.
|
|
|
|
Port uint16
|
|
|
|
|
|
|
|
// EndpointsFunc optionally provides a func to be called when
|
|
|
|
// endpoints change. The called func does not own the slice.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
EndpointsFunc func([]tailcfg.Endpoint)
|
2020-06-25 21:19:12 +00:00
|
|
|
|
2020-08-25 20:21:29 +00:00
|
|
|
// DERPActiveFunc optionally provides a func to be called when
|
|
|
|
// a connection is made to a DERP server.
|
|
|
|
DERPActiveFunc func()
|
|
|
|
|
2020-06-25 21:19:12 +00:00
|
|
|
// IdleFunc optionally provides a func to return how long
|
|
|
|
// it's been since a TUN packet was sent or received.
|
|
|
|
IdleFunc func() time.Duration
|
2020-07-10 21:26:04 +00:00
|
|
|
|
|
|
|
// PacketListener optionally specifies how to create PacketConns.
|
|
|
|
// It's meant for testing.
|
|
|
|
PacketListener nettype.PacketListener
|
2020-07-23 22:15:28 +00:00
|
|
|
|
|
|
|
// NoteRecvActivity, if provided, is a func for magicsock to
|
|
|
|
// call whenever it receives a packet from a a
|
|
|
|
// discovery-capable peer if it's been more than ~10 seconds
|
|
|
|
// since the last one. (10 seconds is somewhat arbitrary; the
|
|
|
|
// sole user just doesn't need or want it called on every
|
|
|
|
// packet, just every minute or two for Wireguard timeouts,
|
|
|
|
// and 10 seconds seems like a good trade-off between often
|
2020-10-13 19:00:58 +00:00
|
|
|
// enough and not too often.) The provided func is called
|
|
|
|
// while holding userspaceEngine.wgLock and likely calls
|
2021-04-30 23:27:43 +00:00
|
|
|
// Conn.ParseEndpoint, which acquires Conn.mu. As such, you
|
2020-10-13 19:00:58 +00:00
|
|
|
// should not hold Conn.mu while calling it.
|
2020-07-23 22:15:28 +00:00
|
|
|
NoteRecvActivity func(tailcfg.DiscoKey)
|
2020-10-28 15:23:12 +00:00
|
|
|
|
|
|
|
// SimulatedNetwork can be set true in tests to signal that
|
|
|
|
// the network is simulated and thus it's okay to bind on the
|
|
|
|
// unspecified address (which we'd normally avoid to avoid
|
|
|
|
// triggering macOS and Windows firwall dialog boxes during
|
|
|
|
// "go test").
|
|
|
|
SimulatedNetwork bool
|
2021-01-15 22:55:44 +00:00
|
|
|
|
|
|
|
// DisableLegacyNetworking disables legacy peer handling. When
|
|
|
|
// enabled, only active discovery-aware nodes will be able to
|
|
|
|
// communicate with Conn.
|
|
|
|
DisableLegacyNetworking bool
|
2021-03-15 20:58:10 +00:00
|
|
|
|
|
|
|
// LinkMonitor is the link monitor to use.
|
|
|
|
// With one, the portmapper won't be used.
|
|
|
|
LinkMonitor *monitor.Mon
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 16:39:00 +00:00
|
|
|
func (o *Options) logf() logger.Logf {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
if o.Logf == nil {
|
|
|
|
panic("must provide magicsock.Options.logf")
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
return o.Logf
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (o *Options) endpointsFunc() func([]tailcfg.Endpoint) {
|
2020-02-05 22:16:58 +00:00
|
|
|
if o == nil || o.EndpointsFunc == nil {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
return func([]tailcfg.Endpoint) {}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return o.EndpointsFunc
|
|
|
|
}
|
|
|
|
|
2020-08-25 20:21:29 +00:00
|
|
|
func (o *Options) derpActiveFunc() func() {
|
|
|
|
if o == nil || o.DERPActiveFunc == nil {
|
|
|
|
return func() {}
|
|
|
|
}
|
|
|
|
return o.DERPActiveFunc
|
|
|
|
}
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
// newConn is the error-free, network-listening-side-effect-free based
|
|
|
|
// of NewConn. Mostly for tests.
|
|
|
|
func newConn() *Conn {
|
2020-02-05 22:16:58 +00:00
|
|
|
c := &Conn{
|
2021-01-15 23:54:45 +00:00
|
|
|
disableLegacy: true,
|
2020-06-28 18:53:37 +00:00
|
|
|
sendLogLimit: rate.NewLimiter(rate.Every(1*time.Minute), 1),
|
2020-12-18 08:22:12 +00:00
|
|
|
addrsByUDP: make(map[netaddr.IPPort]*addrSet),
|
|
|
|
addrsByKey: make(map[key.Public]*addrSet),
|
2020-06-28 18:53:37 +00:00
|
|
|
derpRecvCh: make(chan derpReadResult),
|
|
|
|
derpStarted: make(chan struct{}),
|
|
|
|
peerLastDerp: make(map[key.Public]int),
|
|
|
|
endpointOfDisco: make(map[tailcfg.DiscoKey]*discoEndpoint),
|
2020-06-29 21:26:25 +00:00
|
|
|
sharedDiscoKey: make(map[tailcfg.DiscoKey]*[32]byte),
|
2020-07-02 05:15:41 +00:00
|
|
|
discoOfAddr: make(map[netaddr.IPPort]tailcfg.DiscoKey),
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-03-24 16:41:57 +00:00
|
|
|
c.bind = &connBind{Conn: c, closed: true}
|
2020-08-04 16:36:38 +00:00
|
|
|
c.muCond = sync.NewCond(&c.mu)
|
2020-10-06 22:22:46 +00:00
|
|
|
c.networkUp.Set(true) // assume up until told otherwise
|
2020-05-17 16:51:38 +00:00
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewConn creates a magic Conn listening on opts.Port.
|
|
|
|
// As the set of possible endpoints for a Conn changes, the
|
|
|
|
// callback opts.EndpointsFunc is called.
|
|
|
|
//
|
|
|
|
// It doesn't start doing anything until Start is called.
|
|
|
|
func NewConn(opts Options) (*Conn, error) {
|
|
|
|
c := newConn()
|
2021-06-22 20:00:40 +00:00
|
|
|
c.port.Set(uint32(opts.Port))
|
2020-05-17 16:51:38 +00:00
|
|
|
c.logf = opts.logf()
|
|
|
|
c.epFunc = opts.endpointsFunc()
|
2020-08-25 20:21:29 +00:00
|
|
|
c.derpActiveFunc = opts.derpActiveFunc()
|
2020-06-25 21:19:12 +00:00
|
|
|
c.idleFunc = opts.IdleFunc
|
2020-07-10 21:26:04 +00:00
|
|
|
c.packetListener = opts.PacketListener
|
2020-07-23 22:15:28 +00:00
|
|
|
c.noteRecvActivity = opts.NoteRecvActivity
|
2020-10-28 15:23:12 +00:00
|
|
|
c.simulatedNetwork = opts.SimulatedNetwork
|
2021-01-15 23:54:45 +00:00
|
|
|
c.disableLegacy = opts.DisableLegacyNetworking
|
2021-07-09 17:01:50 +00:00
|
|
|
c.portMapper = portmapper.NewClient(logger.WithPrefix(c.logf, "portmapper: "), c.onPortMapChanged)
|
2021-03-15 20:58:10 +00:00
|
|
|
if opts.LinkMonitor != nil {
|
|
|
|
c.portMapper.SetGatewayLookupFunc(opts.LinkMonitor.GatewayAndSelfIP)
|
|
|
|
}
|
2020-03-19 16:39:00 +00:00
|
|
|
|
|
|
|
if err := c.initialBind(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.connCtx, c.connCtxCancel = context.WithCancel(context.Background())
|
2021-01-16 03:13:59 +00:00
|
|
|
c.donec = c.connCtx.Done()
|
2020-03-09 22:20:33 +00:00
|
|
|
c.netChecker = &netcheck.Client{
|
2020-10-28 15:23:12 +00:00
|
|
|
Logf: logger.WithPrefix(c.logf, "netcheck: "),
|
|
|
|
GetSTUNConn4: func() netcheck.STUNConn { return c.pconn4 },
|
|
|
|
SkipExternalNetwork: inTest(),
|
2021-02-20 06:15:41 +00:00
|
|
|
PortMapper: c.portMapper,
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
2021-02-20 06:15:41 +00:00
|
|
|
|
2020-03-19 16:39:00 +00:00
|
|
|
if c.pconn6 != nil {
|
|
|
|
c.netChecker.GetSTUNConn6 = func() netcheck.STUNConn { return c.pconn6 }
|
2020-03-09 22:20:33 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:57:11 +00:00
|
|
|
c.ignoreSTUNPackets()
|
2020-05-17 16:51:38 +00:00
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) Start() {
|
|
|
|
c.mu.Lock()
|
|
|
|
if c.started {
|
|
|
|
panic("duplicate Start call")
|
|
|
|
}
|
|
|
|
c.started = true
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
2020-03-13 03:10:11 +00:00
|
|
|
c.ReSTUN("initial")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:57:11 +00:00
|
|
|
// ignoreSTUNPackets sets a STUN packet processing func that does nothing.
|
|
|
|
func (c *Conn) ignoreSTUNPackets() {
|
2020-06-30 20:25:13 +00:00
|
|
|
c.stunReceiveFunc.Store(func([]byte, netaddr.IPPort) {})
|
2020-02-18 16:57:11 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
// doPeriodicSTUN is called (in a new goroutine) by
|
|
|
|
// periodicReSTUNTimer when periodic STUNs are active.
|
|
|
|
func (c *Conn) doPeriodicSTUN() { c.ReSTUN("periodic") }
|
|
|
|
|
|
|
|
func (c *Conn) stopPeriodicReSTUNTimerLocked() {
|
|
|
|
if t := c.periodicReSTUNTimer; t != nil {
|
|
|
|
t.Stop()
|
|
|
|
c.periodicReSTUNTimer = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// c.mu must NOT be held.
|
|
|
|
func (c *Conn) updateEndpoints(why string) {
|
|
|
|
defer func() {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
why := c.wantEndpointsUpdate
|
|
|
|
c.wantEndpointsUpdate = ""
|
2021-01-20 17:52:24 +00:00
|
|
|
if !c.closed {
|
|
|
|
if why != "" {
|
|
|
|
go c.updateEndpoints(why)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.shouldDoPeriodicReSTUNLocked() {
|
|
|
|
// Pick a random duration between 20
|
|
|
|
// and 26 seconds (just under 30s, a
|
|
|
|
// common UDP NAT timeout on Linux,
|
|
|
|
// etc)
|
|
|
|
d := tstime.RandomDurationBetween(20*time.Second, 26*time.Second)
|
|
|
|
if t := c.periodicReSTUNTimer; t != nil {
|
|
|
|
if debugReSTUNStopOnIdle {
|
|
|
|
c.logf("resetting existing periodicSTUN to run in %v", d)
|
|
|
|
}
|
|
|
|
t.Reset(d)
|
|
|
|
} else {
|
|
|
|
if debugReSTUNStopOnIdle {
|
|
|
|
c.logf("scheduling periodicSTUN to run in %v", d)
|
|
|
|
}
|
|
|
|
c.periodicReSTUNTimer = time.AfterFunc(d, c.doPeriodicSTUN)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if debugReSTUNStopOnIdle {
|
|
|
|
c.logf("periodic STUN idle")
|
|
|
|
}
|
|
|
|
c.stopPeriodicReSTUNTimerLocked()
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-01-20 17:52:24 +00:00
|
|
|
c.endpointsUpdateActive = false
|
|
|
|
c.muCond.Broadcast()
|
2020-03-13 15:55:38 +00:00
|
|
|
}()
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: starting endpoint update (%s)", why)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
endpoints, err := c.determineEndpoints(c.connCtx)
|
2020-03-13 15:55:38 +00:00
|
|
|
if err != nil {
|
2020-03-23 21:12:23 +00:00
|
|
|
c.logf("magicsock: endpoint update (%s) failed: %v", why, err)
|
2020-03-13 15:55:38 +00:00
|
|
|
// TODO(crawshaw): are there any conditions under which
|
|
|
|
// we should trigger a retry based on the error here?
|
|
|
|
return
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if c.setEndpoints(endpoints) {
|
|
|
|
c.logEndpointChange(endpoints)
|
2020-03-13 15:55:38 +00:00
|
|
|
c.epFunc(endpoints)
|
|
|
|
}
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// setEndpoints records the new endpoints, reporting whether they're changed.
|
|
|
|
// It takes ownership of the slice.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) {
|
2020-10-14 17:44:54 +00:00
|
|
|
anySTUN := false
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
for _, ep := range endpoints {
|
|
|
|
if ep.Type == tailcfg.EndpointSTUN {
|
2020-10-14 17:44:54 +00:00
|
|
|
anySTUN = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-10-14 17:44:54 +00:00
|
|
|
|
|
|
|
if !anySTUN && c.derpMap == nil && !inTest() {
|
|
|
|
// Don't bother storing or reporting this yet. We
|
|
|
|
// don't have a DERP map or any STUN entries, so we're
|
|
|
|
// just starting up. A DERP map should arrive shortly
|
|
|
|
// and then we'll have more interesting endpoints to
|
|
|
|
// report. This saves a map update.
|
|
|
|
// TODO(bradfitz): this optimization is currently
|
|
|
|
// skipped during the e2e tests because they depend
|
|
|
|
// too much on the exact sequence of updates. Fix the
|
|
|
|
// tests. But a protocol rewrite might happen first.
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: ignoring pre-DERP map, STUN-less endpoint update: %v", endpoints)
|
2020-10-14 17:44:54 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
c.lastEndpointsTime = time.Now()
|
|
|
|
for de, fn := range c.onEndpointRefreshed {
|
|
|
|
go fn()
|
|
|
|
delete(c.onEndpointRefreshed, de)
|
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if endpointSetsEqual(endpoints, c.lastEndpoints) {
|
2020-03-13 15:55:38 +00:00
|
|
|
return false
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-03-13 15:55:38 +00:00
|
|
|
c.lastEndpoints = endpoints
|
|
|
|
return true
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-09 23:09:10 +00:00
|
|
|
// setNetInfoHavePortMap updates NetInfo.HavePortMap to true.
|
|
|
|
func (c *Conn) setNetInfoHavePortMap() {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.netInfoLast == nil {
|
|
|
|
// No NetInfo yet. Nothing to update.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.netInfoLast.HavePortMap {
|
|
|
|
// No change.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ni := c.netInfoLast.Clone()
|
|
|
|
ni.HavePortMap = true
|
|
|
|
c.callNetInfoCallbackLocked(ni)
|
|
|
|
}
|
|
|
|
|
2020-03-09 22:20:33 +00:00
|
|
|
func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) {
|
2020-05-17 16:51:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
dm := c.derpMap
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
2020-10-07 03:24:10 +00:00
|
|
|
if dm == nil || c.networkDown() {
|
2020-03-10 18:35:43 +00:00
|
|
|
return new(netcheck.Report), nil
|
2020-03-06 15:47:54 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 22:20:33 +00:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
2020-03-04 06:21:56 +00:00
|
|
|
defer cancel()
|
|
|
|
|
2020-03-09 22:20:33 +00:00
|
|
|
c.stunReceiveFunc.Store(c.netChecker.ReceiveSTUNPacket)
|
|
|
|
defer c.ignoreSTUNPackets()
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
report, err := c.netChecker.GetReport(ctx, dm)
|
2020-03-04 06:21:56 +00:00
|
|
|
if err != nil {
|
2020-03-09 22:20:33 +00:00
|
|
|
return nil, err
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-29 19:40:51 +00:00
|
|
|
c.noV4.Set(!report.IPv4)
|
|
|
|
c.noV6.Set(!report.IPv6)
|
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
ni := &tailcfg.NetInfo{
|
|
|
|
DERPLatency: map[string]float64{},
|
|
|
|
MappingVariesByDestIP: report.MappingVariesByDestIP,
|
|
|
|
HairPinning: report.HairPinning,
|
2020-07-06 20:51:17 +00:00
|
|
|
UPnP: report.UPnP,
|
|
|
|
PMP: report.PMP,
|
|
|
|
PCP: report.PCP,
|
2021-03-09 23:09:10 +00:00
|
|
|
HavePortMap: c.portMapper.HaveMapping(),
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
for rid, d := range report.RegionV4Latency {
|
|
|
|
ni.DERPLatency[fmt.Sprintf("%d-v4", rid)] = d.Seconds()
|
|
|
|
}
|
|
|
|
for rid, d := range report.RegionV6Latency {
|
|
|
|
ni.DERPLatency[fmt.Sprintf("%d-v6", rid)] = d.Seconds()
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
ni.WorkingIPv6.Set(report.IPv6)
|
|
|
|
ni.WorkingUDP.Set(report.UDP)
|
|
|
|
ni.PreferredDERP = report.PreferredDERP
|
|
|
|
|
|
|
|
if ni.PreferredDERP == 0 {
|
|
|
|
// Perhaps UDP is blocked. Pick a deterministic but arbitrary
|
|
|
|
// one.
|
|
|
|
ni.PreferredDERP = c.pickDERPFallback()
|
|
|
|
}
|
2020-03-04 20:21:40 +00:00
|
|
|
if !c.setNearestDERP(ni.PreferredDERP) {
|
|
|
|
ni.PreferredDERP = 0
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
|
|
|
|
// TODO: set link type
|
|
|
|
|
|
|
|
c.callNetInfoCallback(ni)
|
2020-03-09 22:20:33 +00:00
|
|
|
return report, nil
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var processStartUnixNano = time.Now().UnixNano()
|
|
|
|
|
|
|
|
// pickDERPFallback returns a non-zero but deterministic DERP node to
|
|
|
|
// connect to. This is only used if netcheck couldn't find the
|
|
|
|
// nearest one (for instance, if UDP is blocked and thus STUN latency
|
|
|
|
// checks aren't working).
|
2020-03-13 15:55:38 +00:00
|
|
|
//
|
|
|
|
// c.mu must NOT be held.
|
2020-03-04 06:21:56 +00:00
|
|
|
func (c *Conn) pickDERPFallback() int {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-03-04 06:21:56 +00:00
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
if !c.wantDerpLocked() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
ids := c.derpMap.RegionIDs()
|
2020-03-09 22:20:33 +00:00
|
|
|
if len(ids) == 0 {
|
2020-05-17 16:51:38 +00:00
|
|
|
// No DERP regions in non-nil map.
|
2020-03-04 06:21:56 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-03-25 18:14:29 +00:00
|
|
|
// See where our peers are.
|
2020-03-25 05:24:59 +00:00
|
|
|
var (
|
|
|
|
peersOnDerp = map[int]int{}
|
|
|
|
best int
|
|
|
|
bestCount int
|
|
|
|
)
|
|
|
|
for _, as := range c.addrsByKey {
|
|
|
|
if id := as.derpID(); id != 0 {
|
|
|
|
peersOnDerp[id]++
|
|
|
|
if v := peersOnDerp[id]; v > bestCount {
|
|
|
|
bestCount = v
|
|
|
|
best = id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-25 18:14:29 +00:00
|
|
|
|
|
|
|
// If we already had selected something in the past and it has
|
|
|
|
// any peers, stay on it. If there are no peers, though, also
|
|
|
|
// stay where we are.
|
|
|
|
if c.myDerp != 0 && (best == 0 || peersOnDerp[c.myDerp] != 0) {
|
|
|
|
return c.myDerp
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise pick wherever the most peers are.
|
2020-03-25 05:24:59 +00:00
|
|
|
if best != 0 {
|
|
|
|
return best
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise just pick something randomly.
|
2020-03-04 06:21:56 +00:00
|
|
|
h := fnv.New64()
|
|
|
|
h.Write([]byte(fmt.Sprintf("%p/%d", c, processStartUnixNano))) // arbitrary
|
2020-03-09 22:20:33 +00:00
|
|
|
return ids[rand.New(rand.NewSource(int64(h.Sum64()))).Intn(len(ids))]
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// callNetInfoCallback calls the NetInfo callback (if previously
|
|
|
|
// registered with SetNetInfoCallback) if ni has substantially changed
|
|
|
|
// since the last state.
|
|
|
|
//
|
|
|
|
// callNetInfoCallback takes ownership of ni.
|
2020-03-13 15:55:38 +00:00
|
|
|
//
|
|
|
|
// c.mu must NOT be held.
|
2020-03-04 06:21:56 +00:00
|
|
|
func (c *Conn) callNetInfoCallback(ni *tailcfg.NetInfo) {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-03-04 06:21:56 +00:00
|
|
|
if ni.BasicallyEqual(c.netInfoLast) {
|
|
|
|
return
|
|
|
|
}
|
2021-03-09 23:09:10 +00:00
|
|
|
c.callNetInfoCallbackLocked(ni)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) callNetInfoCallbackLocked(ni *tailcfg.NetInfo) {
|
2020-03-04 06:21:56 +00:00
|
|
|
c.netInfoLast = ni
|
|
|
|
if c.netInfoFunc != nil {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: netInfo update: %+v", ni)
|
2020-03-04 06:21:56 +00:00
|
|
|
go c.netInfoFunc(ni)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-15 20:50:33 +00:00
|
|
|
// addValidDiscoPathForTest makes addr a validated disco address for
|
|
|
|
// discoKey. It's used in tests to enable receiving of packets from
|
|
|
|
// addr without having to spin up the entire active discovery
|
|
|
|
// machinery.
|
|
|
|
func (c *Conn) addValidDiscoPathForTest(discoKey tailcfg.DiscoKey, addr netaddr.IPPort) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
c.discoOfAddr[addr] = discoKey
|
|
|
|
}
|
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
func (c *Conn) SetNetInfoCallback(fn func(*tailcfg.NetInfo)) {
|
|
|
|
if fn == nil {
|
|
|
|
panic("nil NetInfoCallback")
|
|
|
|
}
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
2020-03-04 06:21:56 +00:00
|
|
|
last := c.netInfoLast
|
|
|
|
c.netInfoFunc = fn
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Unlock()
|
2020-03-04 06:21:56 +00:00
|
|
|
|
|
|
|
if last != nil {
|
|
|
|
fn(last)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-12 03:07:08 +00:00
|
|
|
// LastRecvActivityOfDisco returns the time we last got traffic from
|
|
|
|
// this endpoint (updated every ~10 seconds).
|
|
|
|
func (c *Conn) LastRecvActivityOfDisco(dk tailcfg.DiscoKey) time.Time {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
de, ok := c.endpointOfDisco[dk]
|
|
|
|
if !ok {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
unix := atomic.LoadInt64(&de.lastRecvUnixAtomic)
|
|
|
|
if unix == 0 {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
return time.Unix(unix, 0)
|
2020-08-09 21:49:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ping handles a "tailscale ping" CLI query.
|
2021-03-23 04:25:43 +00:00
|
|
|
func (c *Conn) Ping(peer *tailcfg.Node, res *ipnstate.PingResult, cb func(*ipnstate.PingResult)) {
|
2020-08-09 21:49:42 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.privateKey.IsZero() {
|
|
|
|
res.Err = "local tailscaled stopped"
|
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(peer.Addresses) > 0 {
|
2021-05-15 01:07:28 +00:00
|
|
|
res.NodeIP = peer.Addresses[0].IP().String()
|
2020-08-09 21:49:42 +00:00
|
|
|
}
|
|
|
|
res.NodeName = peer.Name // prefer DNS name
|
|
|
|
if res.NodeName == "" {
|
|
|
|
res.NodeName = peer.Hostinfo.Hostname // else hostname
|
|
|
|
} else {
|
|
|
|
if i := strings.Index(res.NodeName, "."); i != -1 {
|
|
|
|
res.NodeName = res.NodeName[:i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dk, ok := c.discoOfNode[peer.Key]
|
2020-09-16 15:54:00 +00:00
|
|
|
if !ok { // peer is using outdated Tailscale version (pre-0.100)
|
|
|
|
res.Err = "no discovery key for peer (pre Tailscale 0.100 version?). Try: ping 100.x.y.z"
|
2020-08-09 21:49:42 +00:00
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
de, ok := c.endpointOfDisco[dk]
|
|
|
|
if !ok {
|
|
|
|
c.mu.Unlock() // temporarily release
|
|
|
|
if c.noteRecvActivity != nil {
|
|
|
|
c.noteRecvActivity(dk)
|
|
|
|
}
|
|
|
|
c.mu.Lock() // re-acquire
|
|
|
|
|
|
|
|
// re-check at least basic invariant:
|
|
|
|
if c.privateKey.IsZero() {
|
|
|
|
res.Err = "local tailscaled stopped"
|
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
de, ok = c.endpointOfDisco[dk]
|
|
|
|
if !ok {
|
|
|
|
res.Err = "internal error: failed to create endpoint for discokey"
|
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: started peer %v for ping to %v", dk.ShortString(), peer.Key.ShortString())
|
2020-08-09 21:49:42 +00:00
|
|
|
}
|
|
|
|
de.cliPing(res, cb)
|
|
|
|
}
|
|
|
|
|
|
|
|
// c.mu must be held
|
|
|
|
func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep netaddr.IPPort) {
|
|
|
|
res.LatencySeconds = latency.Seconds()
|
2021-05-15 01:07:28 +00:00
|
|
|
if ep.IP() != derpMagicIPAddr {
|
2020-08-09 21:49:42 +00:00
|
|
|
res.Endpoint = ep.String()
|
|
|
|
return
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
regionID := int(ep.Port())
|
2020-08-09 21:49:42 +00:00
|
|
|
res.DERPRegionID = regionID
|
|
|
|
if c.derpMap != nil {
|
|
|
|
if dr, ok := c.derpMap.Regions[regionID]; ok {
|
|
|
|
res.DERPRegionCode = dr.RegionCode
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-06 19:10:39 +00:00
|
|
|
// DiscoPublicKey returns the discovery public key.
|
|
|
|
func (c *Conn) DiscoPublicKey() tailcfg.DiscoKey {
|
2020-06-19 19:06:49 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-07-06 19:10:39 +00:00
|
|
|
if c.discoPrivate.IsZero() {
|
|
|
|
priv := key.NewPrivate()
|
|
|
|
c.discoPrivate = priv
|
|
|
|
c.discoPublic = tailcfg.DiscoKey(priv.Public())
|
|
|
|
c.discoShort = c.discoPublic.ShortString()
|
|
|
|
c.logf("magicsock: disco key = %v", c.discoShort)
|
|
|
|
}
|
|
|
|
return c.discoPublic
|
2020-06-19 19:06:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 04:08:25 +00:00
|
|
|
// PeerHasDiscoKey reports whether peer k supports discovery keys (client version 0.100.0+).
|
|
|
|
func (c *Conn) PeerHasDiscoKey(k tailcfg.NodeKey) bool {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
_, ok := c.discoOfNode[k]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// c.mu must NOT be held.
|
2020-03-04 20:21:40 +00:00
|
|
|
func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-05-17 16:51:38 +00:00
|
|
|
if !c.wantDerpLocked() {
|
2020-03-04 20:21:40 +00:00
|
|
|
c.myDerp = 0
|
2021-02-25 05:29:51 +00:00
|
|
|
health.SetMagicSockDERPHome(0)
|
2020-03-04 20:21:40 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-03-05 23:00:56 +00:00
|
|
|
if derpNum == c.myDerp {
|
|
|
|
// No change.
|
|
|
|
return true
|
|
|
|
}
|
2020-03-24 15:09:30 +00:00
|
|
|
c.myDerp = derpNum
|
2021-02-25 05:29:51 +00:00
|
|
|
health.SetMagicSockDERPHome(derpNum)
|
2020-03-24 15:09:30 +00:00
|
|
|
|
|
|
|
if c.privateKey.IsZero() {
|
|
|
|
// No private key yet, so DERP connections won't come up anyway.
|
|
|
|
// Return early rather than ultimately log a couple lines of noise.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-03-19 06:32:31 +00:00
|
|
|
// On change, notify all currently connected DERP servers and
|
|
|
|
// start connecting to our home DERP if we are not already.
|
2020-05-28 07:42:03 +00:00
|
|
|
dr := c.derpMap.Regions[derpNum]
|
|
|
|
if dr == nil {
|
|
|
|
c.logf("[unexpected] magicsock: derpMap.Regions[%v] is nil", derpNum)
|
|
|
|
} else {
|
|
|
|
c.logf("magicsock: home is now derp-%v (%v)", derpNum, c.derpMap.Regions[derpNum].RegionCode)
|
|
|
|
}
|
2020-03-05 23:00:56 +00:00
|
|
|
for i, ad := range c.activeDerp {
|
|
|
|
go ad.c.NotePreferred(i == c.myDerp)
|
|
|
|
}
|
2020-04-09 21:21:36 +00:00
|
|
|
c.goDerpConnect(derpNum)
|
2020-03-04 20:21:40 +00:00
|
|
|
return true
|
2020-03-04 01:46:03 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 18:04:42 +00:00
|
|
|
// startDerpHomeConnectLocked starts connecting to our DERP home, if any.
|
|
|
|
//
|
|
|
|
// c.mu must be held.
|
|
|
|
func (c *Conn) startDerpHomeConnectLocked() {
|
|
|
|
c.goDerpConnect(c.myDerp)
|
|
|
|
}
|
|
|
|
|
2020-04-09 21:21:36 +00:00
|
|
|
// goDerpConnect starts a goroutine to start connecting to the given
|
|
|
|
// DERP node.
|
|
|
|
//
|
|
|
|
// c.mu may be held, but does not need to be.
|
|
|
|
func (c *Conn) goDerpConnect(node int) {
|
|
|
|
if node == 0 {
|
|
|
|
return
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
go c.derpWriteChanOfAddr(netaddr.IPPortFrom(derpMagicIPAddr, uint16(node)), key.Public{})
|
2020-04-09 21:21:36 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:57:11 +00:00
|
|
|
// determineEndpoints returns the machine's endpoint addresses. It
|
2020-03-13 15:55:38 +00:00
|
|
|
// does a STUN lookup (via netcheck) to determine its public address.
|
|
|
|
//
|
|
|
|
// c.mu must NOT be held.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) {
|
2021-07-09 17:01:50 +00:00
|
|
|
portmapExt, havePortmap := c.portMapper.GetCachedMappingOrStartCreatingOne()
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
nr, err := c.updateNetInfo(ctx)
|
|
|
|
if err != nil {
|
|
|
|
c.logf("magicsock.Conn.determineEndpoints: updateNetInfo: %v", err)
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
return nil, err
|
2020-03-13 15:55:38 +00:00
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
already := make(map[netaddr.IPPort]tailcfg.EndpointType) // endpoint -> how it was found
|
|
|
|
var eps []tailcfg.Endpoint // unique endpoints
|
2020-02-05 22:16:58 +00:00
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
ipp := func(s string) (ipp netaddr.IPPort) {
|
|
|
|
ipp, _ = netaddr.ParseIPPort(s)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
addAddr := func(ipp netaddr.IPPort, et tailcfg.EndpointType) {
|
|
|
|
if ipp.IsZero() || (debugOmitLocalAddresses && et == tailcfg.EndpointLocal) {
|
2020-07-02 16:53:10 +00:00
|
|
|
return
|
|
|
|
}
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if _, ok := already[ipp]; !ok {
|
|
|
|
already[ipp] = et
|
|
|
|
eps = append(eps, tailcfg.Endpoint{Addr: ipp, Type: et})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-09 17:01:50 +00:00
|
|
|
// If we didn't have a portmap earlier, maybe it's done by now.
|
|
|
|
if !havePortmap {
|
|
|
|
portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne()
|
|
|
|
}
|
|
|
|
if havePortmap {
|
|
|
|
addAddr(portmapExt, tailcfg.EndpointPortmapped)
|
2021-03-09 23:09:10 +00:00
|
|
|
c.setNetInfoHavePortMap()
|
2021-02-20 06:15:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 22:20:33 +00:00
|
|
|
if nr.GlobalV4 != "" {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
addAddr(ipp(nr.GlobalV4), tailcfg.EndpointSTUN)
|
2020-08-04 16:48:34 +00:00
|
|
|
|
|
|
|
// If they're behind a hard NAT and are using a fixed
|
|
|
|
// port locally, assume they might've added a static
|
|
|
|
// port mapping on their router to the same explicit
|
|
|
|
// port that tailscaled is running with. Worst case
|
|
|
|
// it's an invalid candidate mapping.
|
2021-06-22 20:00:40 +00:00
|
|
|
if port := c.port.Get(); nr.MappingVariesByDestIP.EqualBool(true) && port != 0 {
|
2020-08-04 16:48:34 +00:00
|
|
|
if ip, _, err := net.SplitHostPort(nr.GlobalV4); err == nil {
|
2021-06-22 20:00:40 +00:00
|
|
|
addAddr(ipp(net.JoinHostPort(ip, strconv.Itoa(int(port)))), tailcfg.EndpointSTUN4LocalPort)
|
2020-08-04 16:48:34 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-03-19 16:39:00 +00:00
|
|
|
if nr.GlobalV6 != "" {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
addAddr(ipp(nr.GlobalV6), tailcfg.EndpointSTUN)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 16:57:11 +00:00
|
|
|
c.ignoreSTUNPackets()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-03-19 15:49:30 +00:00
|
|
|
if localAddr := c.pconn4.LocalAddr(); localAddr.IP.IsUnspecified() {
|
2020-03-02 18:38:44 +00:00
|
|
|
ips, loopback, err := interfaces.LocalAddresses()
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
return nil, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-05-28 21:16:23 +00:00
|
|
|
if len(ips) == 0 && len(eps) == 0 {
|
2020-02-05 22:16:58 +00:00
|
|
|
// Only include loopback addresses if we have no
|
2020-05-28 21:16:23 +00:00
|
|
|
// interfaces at all to use as endpoints and don't
|
|
|
|
// have a public IPv4 or IPv6 address. This allows
|
2020-02-05 22:16:58 +00:00
|
|
|
// for localhost testing when you're on a plane and
|
|
|
|
// offline, for example.
|
2020-03-02 18:38:44 +00:00
|
|
|
ips = loopback
|
|
|
|
}
|
2021-03-04 06:02:45 +00:00
|
|
|
for _, ip := range ips {
|
2021-05-15 01:07:28 +00:00
|
|
|
addAddr(netaddr.IPPortFrom(ip, uint16(localAddr.Port)), tailcfg.EndpointLocal)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Our local endpoint is bound to a particular address.
|
|
|
|
// Do not offer addresses on other local interfaces.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
addAddr(ipp(localAddr.String()), tailcfg.EndpointLocal)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: the endpoints are intentionally returned in priority order,
|
|
|
|
// from "farthest but most reliable" to "closest but least
|
|
|
|
// reliable." Addresses returned from STUN should be globally
|
|
|
|
// addressable, but might go farther on the network than necessary.
|
|
|
|
// Local interface addresses might have lower latency, but not be
|
|
|
|
// globally addressable.
|
|
|
|
//
|
|
|
|
// The STUN address(es) are always first so that legacy wireguard
|
|
|
|
// can use eps[0] as its only known endpoint address (although that's
|
|
|
|
// obviously non-ideal).
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
//
|
|
|
|
// Despite this sorting, though, clients since 0.100 haven't relied
|
|
|
|
// on the sorting order for any decisions.
|
|
|
|
return eps, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
// endpointSetsEqual reports whether x and y represent the same set of
|
|
|
|
// endpoints. The order doesn't matter.
|
2021-03-22 17:23:26 +00:00
|
|
|
//
|
|
|
|
// It does not mutate the slices.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func endpointSetsEqual(x, y []tailcfg.Endpoint) bool {
|
2021-03-22 17:23:26 +00:00
|
|
|
if len(x) == len(y) {
|
|
|
|
orderMatches := true
|
|
|
|
for i := range x {
|
|
|
|
if x[i] != y[i] {
|
|
|
|
orderMatches = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if orderMatches {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
m := map[tailcfg.Endpoint]int{}
|
2021-03-22 17:23:26 +00:00
|
|
|
for _, v := range x {
|
|
|
|
m[v] |= 1
|
|
|
|
}
|
|
|
|
for _, v := range y {
|
|
|
|
m[v] |= 2
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-03-22 17:23:26 +00:00
|
|
|
for _, n := range m {
|
|
|
|
if n != 3 {
|
2020-02-05 22:16:58 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-02-20 06:15:41 +00:00
|
|
|
// LocalPort returns the current IPv4 listener's port number.
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *Conn) LocalPort() uint16 {
|
2020-03-19 15:49:30 +00:00
|
|
|
laddr := c.pconn4.LocalAddr()
|
2020-02-05 22:16:58 +00:00
|
|
|
return uint16(laddr.Port)
|
|
|
|
}
|
|
|
|
|
2020-10-06 22:22:46 +00:00
|
|
|
var errNetworkDown = errors.New("magicsock: network down")
|
|
|
|
|
|
|
|
func (c *Conn) networkDown() bool { return !c.networkUp.Get() }
|
2020-02-18 21:32:04 +00:00
|
|
|
|
2020-02-24 12:27:48 +00:00
|
|
|
func (c *Conn) Send(b []byte, ep conn.Endpoint) error {
|
2020-10-06 22:22:46 +00:00
|
|
|
if c.networkDown() {
|
|
|
|
return errNetworkDown
|
|
|
|
}
|
|
|
|
|
2020-02-24 16:47:20 +00:00
|
|
|
switch v := ep.(type) {
|
|
|
|
default:
|
2020-03-05 16:18:12 +00:00
|
|
|
panic(fmt.Sprintf("[unexpected] Endpoint type %T", v))
|
2020-06-28 18:53:37 +00:00
|
|
|
case *discoEndpoint:
|
|
|
|
return v.send(b)
|
2020-12-18 08:22:12 +00:00
|
|
|
case *addrSet:
|
2020-12-18 08:27:29 +00:00
|
|
|
return c.sendAddrSet(b, v)
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
var errConnClosed = errors.New("Conn closed")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
var errDropDerpPacket = errors.New("too many DERP packets queued; dropping")
|
|
|
|
|
2020-12-31 01:34:35 +00:00
|
|
|
var udpAddrPool = &sync.Pool{
|
|
|
|
New: func() interface{} { return new(net.UDPAddr) },
|
|
|
|
}
|
|
|
|
|
2020-06-30 19:22:42 +00:00
|
|
|
// sendUDP sends UDP packet b to ipp.
|
2020-07-01 21:39:21 +00:00
|
|
|
// See sendAddr's docs on the return value meanings.
|
|
|
|
func (c *Conn) sendUDP(ipp netaddr.IPPort, b []byte) (sent bool, err error) {
|
2020-12-31 01:34:35 +00:00
|
|
|
ua := udpAddrPool.Get().(*net.UDPAddr)
|
|
|
|
defer udpAddrPool.Put(ua)
|
|
|
|
return c.sendUDPStd(ipp.UDPAddrAt(ua), b)
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
|
|
|
|
2020-07-01 21:39:21 +00:00
|
|
|
// sendUDP sends UDP packet b to addr.
|
|
|
|
// See sendAddr's docs on the return value meanings.
|
|
|
|
func (c *Conn) sendUDPStd(addr *net.UDPAddr, b []byte) (sent bool, err error) {
|
2020-06-30 19:22:42 +00:00
|
|
|
switch {
|
|
|
|
case addr.IP.To4() != nil:
|
|
|
|
_, err = c.pconn4.WriteTo(b, addr)
|
2020-05-29 19:40:51 +00:00
|
|
|
if err != nil && c.noV4.Get() {
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, nil
|
2020-05-29 19:40:51 +00:00
|
|
|
}
|
2020-06-30 19:22:42 +00:00
|
|
|
case len(addr.IP) == net.IPv6len:
|
|
|
|
if c.pconn6 == nil {
|
|
|
|
// ignore IPv6 dest if we don't have an IPv6 address.
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, nil
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
|
|
|
_, err = c.pconn6.WriteTo(b, addr)
|
2020-05-29 19:40:51 +00:00
|
|
|
if err != nil && c.noV6.Get() {
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, nil
|
2020-05-29 19:40:51 +00:00
|
|
|
}
|
2020-06-30 19:22:42 +00:00
|
|
|
default:
|
2020-07-01 21:39:21 +00:00
|
|
|
panic("bogus sendUDPStd addr type")
|
2020-03-20 20:38:21 +00:00
|
|
|
}
|
2020-07-01 21:39:21 +00:00
|
|
|
return err == nil, err
|
2020-03-20 20:38:21 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
// sendAddr sends packet b to addr, which is either a real UDP address
|
|
|
|
// or a fake UDP address representing a DERP server (see derpmap.go).
|
|
|
|
// The provided public key identifies the recipient.
|
2020-07-01 21:39:21 +00:00
|
|
|
//
|
|
|
|
// The returned err is whether there was an error writing when it
|
|
|
|
// should've worked.
|
|
|
|
// The returned sent is whether a packet went out at all.
|
|
|
|
// An example of when they might be different: sending to an
|
|
|
|
// IPv6 address when the local machine doesn't have IPv6 support
|
|
|
|
// returns (false, nil); it's not an error, but nothing was sent.
|
|
|
|
func (c *Conn) sendAddr(addr netaddr.IPPort, pubKey key.Public, b []byte) (sent bool, err error) {
|
2021-05-15 01:07:28 +00:00
|
|
|
if addr.IP() != derpMagicIPAddr {
|
2020-03-20 20:38:21 +00:00
|
|
|
return c.sendUDP(addr, b)
|
2020-03-04 20:21:40 +00:00
|
|
|
}
|
|
|
|
|
2020-03-22 01:24:28 +00:00
|
|
|
ch := c.derpWriteChanOfAddr(addr, pubKey)
|
2020-03-04 20:21:40 +00:00
|
|
|
if ch == nil {
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, nil
|
2020-03-04 20:21:40 +00:00
|
|
|
}
|
2020-03-12 19:05:32 +00:00
|
|
|
|
|
|
|
// TODO(bradfitz): this makes garbage for now; we could use a
|
|
|
|
// buffer pool later. Previously we passed ownership of this
|
|
|
|
// to derpWriteRequest and waited for derphttp.Client.Send to
|
|
|
|
// complete, but that's too slow while holding wireguard-go
|
|
|
|
// internal locks.
|
|
|
|
pkt := make([]byte, len(b))
|
|
|
|
copy(pkt, b)
|
|
|
|
|
2020-03-04 20:21:40 +00:00
|
|
|
select {
|
2021-01-16 03:13:59 +00:00
|
|
|
case <-c.donec:
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, errConnClosed
|
2020-03-12 19:05:32 +00:00
|
|
|
case ch <- derpWriteRequest{addr, pubKey, pkt}:
|
2020-07-01 21:39:21 +00:00
|
|
|
return true, nil
|
2020-03-04 20:21:40 +00:00
|
|
|
default:
|
|
|
|
// Too many writes queued. Drop packet.
|
2020-07-01 21:39:21 +00:00
|
|
|
return false, errDropDerpPacket
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
// bufferedDerpWritesBeforeDrop is how many packets writes can be
|
|
|
|
// queued up the DERP client to write on the wire before we start
|
|
|
|
// dropping.
|
|
|
|
//
|
|
|
|
// TODO: this is currently arbitrary. Figure out something better?
|
2020-03-12 18:16:54 +00:00
|
|
|
const bufferedDerpWritesBeforeDrop = 32
|
2020-02-18 21:32:04 +00:00
|
|
|
|
|
|
|
// derpWriteChanOfAddr returns a DERP client for fake UDP addresses that
|
|
|
|
// represent DERP servers, creating them as necessary. For real UDP
|
|
|
|
// addresses, it returns nil.
|
2020-03-22 01:24:28 +00:00
|
|
|
//
|
|
|
|
// If peer is non-zero, it can be used to find an active reverse
|
|
|
|
// path, without using addr.
|
2020-06-30 19:22:42 +00:00
|
|
|
func (c *Conn) derpWriteChanOfAddr(addr netaddr.IPPort, peer key.Public) chan<- derpWriteRequest {
|
2021-05-15 01:07:28 +00:00
|
|
|
if addr.IP() != derpMagicIPAddr {
|
2020-02-18 21:32:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
regionID := int(addr.Port())
|
2020-03-22 21:08:59 +00:00
|
|
|
|
2020-10-06 22:22:46 +00:00
|
|
|
if c.networkDown() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-05-17 16:51:38 +00:00
|
|
|
if !c.wantDerpLocked() || c.closed {
|
2020-03-04 20:21:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-02-28 19:13:28 +00:00
|
|
|
if c.privateKey.IsZero() {
|
2020-03-24 05:11:49 +00:00
|
|
|
c.logf("magicsock: DERP lookup of %v with no private key; ignoring", addr)
|
2020-02-28 19:13:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-03-09 22:20:33 +00:00
|
|
|
|
2020-03-22 01:24:28 +00:00
|
|
|
// See if we have a connection open to that DERP node ID
|
|
|
|
// first. If so, might as well use it. (It's a little
|
|
|
|
// arbitrary whether we use this one vs. the reverse route
|
|
|
|
// below when we have both.)
|
2020-05-17 16:51:38 +00:00
|
|
|
ad, ok := c.activeDerp[regionID]
|
2020-03-22 21:08:59 +00:00
|
|
|
if ok {
|
|
|
|
*ad.lastWrite = time.Now()
|
2020-05-17 16:51:38 +00:00
|
|
|
c.setPeerLastDerpLocked(peer, regionID, regionID)
|
2020-03-22 21:08:59 +00:00
|
|
|
return ad.writeCh
|
|
|
|
}
|
2020-03-12 18:16:54 +00:00
|
|
|
|
2020-03-22 01:24:28 +00:00
|
|
|
// If we don't have an open connection to the peer's home DERP
|
|
|
|
// node, see if we have an open connection to a DERP node
|
|
|
|
// where we'd heard from that peer already. For instance,
|
|
|
|
// perhaps peer's home is Frankfurt, but they dialed our home DERP
|
|
|
|
// node in SF to reach us, so we can reply to them using our
|
|
|
|
// SF connection rather than dialing Frankfurt. (Issue 150)
|
2020-08-17 19:56:17 +00:00
|
|
|
if !peer.IsZero() && useDerpRoute() {
|
2020-03-22 01:24:28 +00:00
|
|
|
if r, ok := c.derpRoute[peer]; ok {
|
|
|
|
if ad, ok := c.activeDerp[r.derpID]; ok && ad.c == r.dc {
|
2020-05-17 16:51:38 +00:00
|
|
|
c.setPeerLastDerpLocked(peer, r.derpID, regionID)
|
2020-03-22 01:24:28 +00:00
|
|
|
*ad.lastWrite = time.Now()
|
|
|
|
return ad.writeCh
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-24 15:09:30 +00:00
|
|
|
why := "home-keep-alive"
|
|
|
|
if !peer.IsZero() {
|
|
|
|
why = peerShort(peer)
|
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
c.logf("magicsock: adding connection to derp-%v for %v", regionID, why)
|
2020-03-23 21:12:23 +00:00
|
|
|
|
2020-05-14 00:54:27 +00:00
|
|
|
firstDerp := false
|
2020-03-22 21:08:59 +00:00
|
|
|
if c.activeDerp == nil {
|
2020-05-14 00:54:27 +00:00
|
|
|
firstDerp = true
|
2020-03-22 21:08:59 +00:00
|
|
|
c.activeDerp = make(map[int]activeDerp)
|
|
|
|
c.prevDerp = make(map[int]*syncs.WaitGroupChan)
|
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
if c.derpMap == nil || c.derpMap.Regions[regionID] == nil {
|
2020-03-22 21:08:59 +00:00
|
|
|
return nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-03-22 21:08:59 +00:00
|
|
|
|
2021-01-10 14:50:35 +00:00
|
|
|
// Note that derphttp.NewRegionClient does not dial the server
|
2020-03-22 21:08:59 +00:00
|
|
|
// so it is safe to do under the mu lock.
|
2020-05-17 16:51:38 +00:00
|
|
|
dc := derphttp.NewRegionClient(c.privateKey, c.logf, func() *tailcfg.DERPRegion {
|
2020-07-27 19:23:14 +00:00
|
|
|
if c.connCtx.Err() != nil {
|
|
|
|
// If we're closing, don't try to acquire the lock.
|
|
|
|
// We might already be in Conn.Close and the Lock would deadlock.
|
|
|
|
return nil
|
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.derpMap == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return c.derpMap.Regions[regionID]
|
|
|
|
})
|
2020-03-22 01:24:28 +00:00
|
|
|
|
2021-03-12 17:45:37 +00:00
|
|
|
dc.SetCanAckPings(true)
|
2020-05-17 16:51:38 +00:00
|
|
|
dc.NotePreferred(c.myDerp == regionID)
|
2020-03-22 21:08:59 +00:00
|
|
|
dc.DNSCache = dnscache.Get()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(c.connCtx)
|
|
|
|
ch := make(chan derpWriteRequest, bufferedDerpWritesBeforeDrop)
|
|
|
|
|
|
|
|
ad.c = dc
|
|
|
|
ad.writeCh = ch
|
|
|
|
ad.cancel = cancel
|
|
|
|
ad.lastWrite = new(time.Time)
|
2020-03-23 21:12:23 +00:00
|
|
|
*ad.lastWrite = time.Now()
|
|
|
|
ad.createTime = time.Now()
|
2020-05-17 16:51:38 +00:00
|
|
|
c.activeDerp[regionID] = ad
|
2020-03-23 21:12:23 +00:00
|
|
|
c.logActiveDerpLocked()
|
2020-05-17 16:51:38 +00:00
|
|
|
c.setPeerLastDerpLocked(peer, regionID, regionID)
|
2021-01-19 23:29:50 +00:00
|
|
|
c.scheduleCleanStaleDerpLocked()
|
2020-03-22 21:08:59 +00:00
|
|
|
|
|
|
|
// Build a startGate for the derp reader+writer
|
|
|
|
// goroutines, so they don't start running until any
|
|
|
|
// previous generation is closed.
|
|
|
|
startGate := syncs.ClosedChan()
|
2020-05-17 16:51:38 +00:00
|
|
|
if prev := c.prevDerp[regionID]; prev != nil {
|
2020-03-22 21:08:59 +00:00
|
|
|
startGate = prev.DoneChan()
|
|
|
|
}
|
|
|
|
// And register a WaitGroup(Chan) for this generation.
|
|
|
|
wg := syncs.NewWaitGroupChan()
|
|
|
|
wg.Add(2)
|
2020-05-17 16:51:38 +00:00
|
|
|
c.prevDerp[regionID] = wg
|
2020-03-22 21:08:59 +00:00
|
|
|
|
2020-05-14 17:01:48 +00:00
|
|
|
if firstDerp {
|
|
|
|
startGate = c.derpStarted
|
|
|
|
go func() {
|
|
|
|
dc.Connect(ctx)
|
2020-05-14 00:54:27 +00:00
|
|
|
close(c.derpStarted)
|
2020-08-04 16:36:38 +00:00
|
|
|
c.muCond.Broadcast()
|
2020-05-14 17:01:48 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
go c.runDerpReader(ctx, addr, dc, wg, startGate)
|
2020-06-30 19:22:42 +00:00
|
|
|
go c.runDerpWriter(ctx, dc, ch, wg, startGate)
|
2020-08-25 20:21:29 +00:00
|
|
|
go c.derpActiveFunc()
|
2020-03-22 21:08:59 +00:00
|
|
|
|
2020-03-05 16:54:08 +00:00
|
|
|
return ad.writeCh
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-03-24 15:09:30 +00:00
|
|
|
// setPeerLastDerpLocked notes that peer is now being written to via
|
2020-05-17 16:51:38 +00:00
|
|
|
// the provided DERP regionID, and that the peer advertises a DERP
|
|
|
|
// home region ID of homeID.
|
2020-03-24 15:09:30 +00:00
|
|
|
//
|
|
|
|
// If there's any change, it logs.
|
|
|
|
//
|
2020-03-23 21:12:23 +00:00
|
|
|
// c.mu must be held.
|
2020-05-17 16:51:38 +00:00
|
|
|
func (c *Conn) setPeerLastDerpLocked(peer key.Public, regionID, homeID int) {
|
2020-03-23 21:12:23 +00:00
|
|
|
if peer.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
old := c.peerLastDerp[peer]
|
2020-05-17 16:51:38 +00:00
|
|
|
if old == regionID {
|
2020-03-23 21:12:23 +00:00
|
|
|
return
|
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
c.peerLastDerp[peer] = regionID
|
2020-03-23 21:12:23 +00:00
|
|
|
|
2020-03-24 15:09:30 +00:00
|
|
|
var newDesc string
|
|
|
|
switch {
|
2020-05-17 16:51:38 +00:00
|
|
|
case regionID == homeID && regionID == c.myDerp:
|
2020-03-24 15:09:30 +00:00
|
|
|
newDesc = "shared home"
|
2020-05-17 16:51:38 +00:00
|
|
|
case regionID == homeID:
|
2020-03-24 15:09:30 +00:00
|
|
|
newDesc = "their home"
|
2020-05-17 16:51:38 +00:00
|
|
|
case regionID == c.myDerp:
|
2020-03-24 15:09:30 +00:00
|
|
|
newDesc = "our home"
|
2020-05-17 16:51:38 +00:00
|
|
|
case regionID != homeID:
|
2020-03-24 15:09:30 +00:00
|
|
|
newDesc = "alt"
|
|
|
|
}
|
|
|
|
if old == 0 {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: derp route for %s set to derp-%d (%s)", peerShort(peer), regionID, newDesc)
|
2020-03-24 15:09:30 +00:00
|
|
|
} else {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: derp route for %s changed from derp-%d => derp-%d (%s)", peerShort(peer), old, regionID, newDesc)
|
2020-03-24 15:09:30 +00:00
|
|
|
}
|
2020-03-23 21:12:23 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
// derpReadResult is the type sent by runDerpClient to ReceiveIPv4
|
|
|
|
// when a DERP packet is available.
|
2020-03-04 17:35:32 +00:00
|
|
|
//
|
|
|
|
// Notably, it doesn't include the derp.ReceivedPacket because we
|
|
|
|
// don't want to give the receiver access to the aliased []byte. To
|
|
|
|
// get at the packet contents they need to call copyBuf to copy it
|
|
|
|
// out, which also releases the buffer.
|
2020-02-18 21:32:04 +00:00
|
|
|
type derpReadResult struct {
|
2020-06-30 19:22:42 +00:00
|
|
|
regionID int
|
2020-03-04 17:35:32 +00:00
|
|
|
n int // length of data received
|
|
|
|
src key.Public // may be zero until server deployment if v2+
|
2020-02-18 21:32:04 +00:00
|
|
|
// copyBuf is called to copy the data to dst. It returns how
|
|
|
|
// much data was copied, which will be n if dst is large
|
2020-03-04 17:35:32 +00:00
|
|
|
// enough. copyBuf can only be called once.
|
2021-02-07 06:39:58 +00:00
|
|
|
// If copyBuf is nil, that's a signal from the sender to ignore
|
|
|
|
// this message.
|
2020-02-18 21:32:04 +00:00
|
|
|
copyBuf func(dst []byte) int
|
|
|
|
}
|
|
|
|
|
|
|
|
// runDerpReader runs in a goroutine for the life of a DERP
|
|
|
|
// connection, handling received packets.
|
2020-06-30 19:22:42 +00:00
|
|
|
func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netaddr.IPPort, dc *derphttp.Client, wg *syncs.WaitGroupChan, startGate <-chan struct{}) {
|
2020-03-12 18:16:54 +00:00
|
|
|
defer wg.Decr()
|
|
|
|
defer dc.Close()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-startGate:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
didCopy := make(chan struct{}, 1)
|
2021-05-15 01:07:28 +00:00
|
|
|
regionID := int(derpFakeAddr.Port())
|
2020-06-30 19:22:42 +00:00
|
|
|
res := derpReadResult{regionID: regionID}
|
2020-03-04 17:35:32 +00:00
|
|
|
var pkt derp.ReceivedPacket
|
|
|
|
res.copyBuf = func(dst []byte) int {
|
|
|
|
n := copy(dst, pkt.Data)
|
2020-02-18 21:32:04 +00:00
|
|
|
didCopy <- struct{}{}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-02-25 05:29:51 +00:00
|
|
|
defer health.SetDERPRegionConnectedState(regionID, false)
|
|
|
|
|
2020-03-22 01:24:28 +00:00
|
|
|
// peerPresent is the set of senders we know are present on this
|
|
|
|
// connection, based on messages we've received from the server.
|
|
|
|
peerPresent := map[key.Public]bool{}
|
2020-10-19 22:11:40 +00:00
|
|
|
bo := backoff.NewBackoff(fmt.Sprintf("derp-%d", regionID), c.logf, 5*time.Second)
|
2021-02-25 05:29:51 +00:00
|
|
|
var lastPacketTime time.Time
|
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
for {
|
2021-03-04 17:19:45 +00:00
|
|
|
msg, connGen, err := dc.RecvDetail()
|
2020-02-18 21:32:04 +00:00
|
|
|
if err != nil {
|
2021-02-25 05:29:51 +00:00
|
|
|
health.SetDERPRegionConnectedState(regionID, false)
|
2020-03-22 01:24:28 +00:00
|
|
|
// Forget that all these peers have routes.
|
|
|
|
for peer := range peerPresent {
|
|
|
|
delete(peerPresent, peer)
|
2020-06-30 19:22:42 +00:00
|
|
|
c.removeDerpPeerRoute(peer, regionID, dc)
|
2020-03-22 01:24:28 +00:00
|
|
|
}
|
2020-10-06 22:22:46 +00:00
|
|
|
if err == derphttp.ErrClientClosed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.networkDown() {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: derp.Recv(derp-%d): network down, closing", regionID)
|
2020-10-06 22:22:46 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-18 21:32:04 +00:00
|
|
|
select {
|
2020-02-28 19:13:28 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2020-02-18 21:32:04 +00:00
|
|
|
default:
|
|
|
|
}
|
2020-10-19 22:11:40 +00:00
|
|
|
|
2020-06-30 19:22:42 +00:00
|
|
|
c.logf("magicsock: [%p] derp.Recv(derp-%d): %v", dc, regionID, err)
|
2020-07-16 15:21:34 +00:00
|
|
|
|
2020-10-19 22:11:40 +00:00
|
|
|
// If our DERP connection broke, it might be because our network
|
|
|
|
// conditions changed. Start that check.
|
|
|
|
c.ReSTUN("derp-recv-error")
|
|
|
|
|
|
|
|
// Back off a bit before reconnecting.
|
|
|
|
bo.BackOff(ctx, err)
|
2020-07-16 14:44:57 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2020-10-19 22:11:40 +00:00
|
|
|
default:
|
2020-07-16 14:44:57 +00:00
|
|
|
}
|
2020-02-18 21:32:04 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-10-19 22:11:40 +00:00
|
|
|
bo.BackOff(ctx, nil) // reset
|
|
|
|
|
2021-02-25 05:29:51 +00:00
|
|
|
now := time.Now()
|
|
|
|
if lastPacketTime.IsZero() || now.Sub(lastPacketTime) > 5*time.Second {
|
|
|
|
health.NoteDERPRegionReceivedFrame(regionID)
|
|
|
|
lastPacketTime = now
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:10:54 +00:00
|
|
|
switch m := msg.(type) {
|
2021-03-04 17:19:45 +00:00
|
|
|
case derp.ServerInfoMessage:
|
2021-02-25 05:29:51 +00:00
|
|
|
health.SetDERPRegionConnectedState(regionID, true)
|
2021-03-04 17:19:45 +00:00
|
|
|
c.logf("magicsock: derp-%d connected; connGen=%v", regionID, connGen)
|
|
|
|
continue
|
2020-02-21 03:10:54 +00:00
|
|
|
case derp.ReceivedPacket:
|
2020-03-04 17:35:32 +00:00
|
|
|
pkt = m
|
|
|
|
res.n = len(m.Data)
|
|
|
|
res.src = m.Source
|
|
|
|
if logDerpVerbose {
|
2020-06-30 19:22:42 +00:00
|
|
|
c.logf("magicsock: got derp-%v packet: %q", regionID, m.Data)
|
2020-03-04 17:35:32 +00:00
|
|
|
}
|
2020-03-22 01:24:28 +00:00
|
|
|
// If this is a new sender we hadn't seen before, remember it and
|
|
|
|
// register a route for this peer.
|
|
|
|
if _, ok := peerPresent[m.Source]; !ok {
|
|
|
|
peerPresent[m.Source] = true
|
2020-06-30 19:22:42 +00:00
|
|
|
c.addDerpPeerRoute(m.Source, regionID, dc)
|
2020-03-22 01:24:28 +00:00
|
|
|
}
|
2021-03-09 20:53:02 +00:00
|
|
|
case derp.PingMessage:
|
|
|
|
// Best effort reply to the ping.
|
|
|
|
pingData := [8]byte(m)
|
|
|
|
go func() {
|
|
|
|
if err := dc.SendPong(pingData); err != nil {
|
|
|
|
c.logf("magicsock: derp-%d SendPong error: %v", regionID, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
continue
|
2020-02-21 03:10:54 +00:00
|
|
|
default:
|
|
|
|
// Ignore.
|
|
|
|
continue
|
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2021-02-07 05:27:02 +00:00
|
|
|
return
|
2021-03-24 16:41:57 +00:00
|
|
|
case c.derpRecvCh <- res:
|
2021-02-07 05:27:02 +00:00
|
|
|
}
|
2021-03-24 16:41:57 +00:00
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
select {
|
2020-03-12 18:16:54 +00:00
|
|
|
case <-ctx.Done():
|
2020-02-18 21:32:04 +00:00
|
|
|
return
|
2021-02-07 05:27:02 +00:00
|
|
|
case <-didCopy:
|
|
|
|
continue
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type derpWriteRequest struct {
|
2020-06-30 19:22:42 +00:00
|
|
|
addr netaddr.IPPort
|
2020-02-18 21:32:04 +00:00
|
|
|
pubKey key.Public
|
2020-03-12 19:05:32 +00:00
|
|
|
b []byte // copied; ownership passed to receiver
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// runDerpWriter runs in a goroutine for the life of a DERP
|
|
|
|
// connection, handling received packets.
|
2020-06-30 19:22:42 +00:00
|
|
|
func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan derpWriteRequest, wg *syncs.WaitGroupChan, startGate <-chan struct{}) {
|
2020-03-12 18:16:54 +00:00
|
|
|
defer wg.Decr()
|
|
|
|
select {
|
|
|
|
case <-startGate:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-18 21:32:04 +00:00
|
|
|
for {
|
|
|
|
select {
|
2020-02-28 19:13:28 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2020-02-18 21:32:04 +00:00
|
|
|
case wr := <-ch:
|
|
|
|
err := dc.Send(wr.pubKey, wr.b)
|
|
|
|
if err != nil {
|
2020-03-07 21:11:52 +00:00
|
|
|
c.logf("magicsock: derp.Send(%v): %v", wr.addr, err)
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 21:36:18 +00:00
|
|
|
// findEndpoint maps from a UDP address to a WireGuard endpoint, for
|
|
|
|
// ReceiveIPv4/ReceiveIPv6.
|
2020-07-02 05:15:41 +00:00
|
|
|
//
|
|
|
|
// TODO(bradfitz): add a fast path that returns nil here for normal
|
2020-07-29 19:59:11 +00:00
|
|
|
// wireguard-go transport packets; wireguard-go only uses this
|
|
|
|
// Endpoint for the relatively rare non-data packets; but we need the
|
|
|
|
// Endpoint to find the UDPAddr to return to wireguard anyway, so no
|
|
|
|
// benefit unless we can, say, always return the same fake UDPAddr for
|
|
|
|
// all packets.
|
2021-02-11 20:39:56 +00:00
|
|
|
func (c *Conn) findEndpoint(ipp netaddr.IPPort, packet []byte) conn.Endpoint {
|
2020-06-30 21:37:35 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-03-07 21:36:18 +00:00
|
|
|
|
2020-06-30 21:37:35 +00:00
|
|
|
// See if they have a discoEndpoint, for a set of peers
|
|
|
|
// both supporting active discovery.
|
2020-07-02 05:15:41 +00:00
|
|
|
if dk, ok := c.discoOfAddr[ipp]; ok {
|
|
|
|
if ep, ok := c.endpointOfDisco[dk]; ok {
|
|
|
|
return ep
|
|
|
|
}
|
2020-04-17 20:51:52 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-02-11 20:39:56 +00:00
|
|
|
return c.findLegacyEndpointLocked(ipp, packet)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 21:57:03 +00:00
|
|
|
// noteRecvActivityFromEndpoint calls the c.noteRecvActivity hook if
|
|
|
|
// e is a discovery-capable peer and this is the first receive activity
|
|
|
|
// it's got in awhile (in last 10 seconds).
|
2020-07-23 22:15:28 +00:00
|
|
|
//
|
|
|
|
// This should be called whenever a packet arrives from e.
|
2020-08-06 21:57:03 +00:00
|
|
|
func (c *Conn) noteRecvActivityFromEndpoint(e conn.Endpoint) {
|
|
|
|
de, ok := e.(*discoEndpoint)
|
|
|
|
if ok && c.noteRecvActivity != nil && de.isFirstRecvActivityInAwhile() {
|
|
|
|
c.noteRecvActivity(de.discoKey)
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// receiveIPv6 receives a UDP IPv6 packet. It is called by wireguard-go.
|
|
|
|
func (c *Conn) receiveIPv6(b []byte) (int, conn.Endpoint, error) {
|
2021-04-28 17:43:51 +00:00
|
|
|
health.ReceiveIPv6.Enter()
|
|
|
|
defer health.ReceiveIPv6.Exit()
|
2021-01-18 16:39:52 +00:00
|
|
|
for {
|
2021-02-11 21:35:06 +00:00
|
|
|
n, ipp, err := c.pconn6.ReadFromNetaddr(b)
|
2021-01-18 16:39:52 +00:00
|
|
|
if err != nil {
|
2021-01-15 02:06:08 +00:00
|
|
|
return 0, nil, err
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
2021-02-11 20:39:56 +00:00
|
|
|
if ep, ok := c.receiveIP(b[:n], ipp, &c.ippEndpoint6); ok {
|
2021-01-18 16:39:52 +00:00
|
|
|
return n, ep, nil
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-30 19:22:42 +00:00
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// receiveIPv4 receives a UDP IPv4 packet. It is called by wireguard-go.
|
|
|
|
func (c *Conn) receiveIPv4(b []byte) (n int, ep conn.Endpoint, err error) {
|
2021-04-27 00:08:05 +00:00
|
|
|
health.ReceiveIPv4.Enter()
|
|
|
|
defer health.ReceiveIPv4.Exit()
|
2021-01-18 16:39:52 +00:00
|
|
|
for {
|
2021-03-24 16:41:57 +00:00
|
|
|
n, ipp, err := c.pconn4.ReadFromNetaddr(b)
|
2021-01-18 16:39:52 +00:00
|
|
|
if err != nil {
|
2021-01-15 02:06:08 +00:00
|
|
|
return 0, nil, err
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
2021-02-11 20:39:56 +00:00
|
|
|
if ep, ok := c.receiveIP(b[:n], ipp, &c.ippEndpoint4); ok {
|
2021-01-18 16:39:52 +00:00
|
|
|
return n, ep, nil
|
2021-01-12 23:28:33 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-07 01:50:36 +00:00
|
|
|
|
2021-01-18 16:39:52 +00:00
|
|
|
// receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6.
|
2021-02-04 02:15:01 +00:00
|
|
|
//
|
|
|
|
// ok is whether this read should be reported up to wireguard-go (our
|
|
|
|
// caller).
|
2021-02-11 20:39:56 +00:00
|
|
|
func (c *Conn) receiveIP(b []byte, ipp netaddr.IPPort, cache *ippEndpointCache) (ep conn.Endpoint, ok bool) {
|
2021-01-18 16:39:52 +00:00
|
|
|
if stun.Is(b) {
|
|
|
|
c.stunReceiveFunc.Load().(func([]byte, netaddr.IPPort))(b, ipp)
|
2021-01-19 18:57:30 +00:00
|
|
|
return nil, false
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
if c.handleDiscoMessage(b, ipp) {
|
2021-01-19 18:57:30 +00:00
|
|
|
return nil, false
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
2021-02-04 02:15:01 +00:00
|
|
|
if !c.havePrivateKey.Get() {
|
|
|
|
// If we have no private key, we're logged out or
|
2021-03-24 16:41:57 +00:00
|
|
|
// stopped. Don't try to pass these wireguard packets
|
|
|
|
// up to wireguard-go; it'll just complain (issue 1167).
|
2021-02-04 02:15:01 +00:00
|
|
|
return nil, false
|
|
|
|
}
|
2021-01-18 23:27:44 +00:00
|
|
|
if cache.ipp == ipp && cache.de != nil && cache.gen == cache.de.numStopAndReset() {
|
|
|
|
ep = cache.de
|
|
|
|
} else {
|
2021-02-11 20:39:56 +00:00
|
|
|
ep = c.findEndpoint(ipp, b)
|
2021-01-18 23:27:44 +00:00
|
|
|
if ep == nil {
|
2021-01-19 18:57:30 +00:00
|
|
|
return nil, false
|
2021-01-18 23:27:44 +00:00
|
|
|
}
|
|
|
|
if de, ok := ep.(*discoEndpoint); ok {
|
|
|
|
cache.ipp = ipp
|
|
|
|
cache.de = de
|
|
|
|
cache.gen = de.numStopAndReset()
|
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
c.noteRecvActivityFromEndpoint(ep)
|
|
|
|
return ep, true
|
|
|
|
}
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// receiveDERP reads a packet from c.derpRecvCh into b and returns the associated endpoint.
|
|
|
|
// It is called by wireguard-go.
|
2021-01-18 16:39:52 +00:00
|
|
|
//
|
|
|
|
// If the packet was a disco message or the peer endpoint wasn't
|
|
|
|
// found, the returned error is errLoopAgain.
|
2021-03-24 16:41:57 +00:00
|
|
|
func (c *connBind) receiveDERP(b []byte) (n int, ep conn.Endpoint, err error) {
|
2021-04-27 00:08:05 +00:00
|
|
|
health.ReceiveDERP.Enter()
|
|
|
|
defer health.ReceiveDERP.Exit()
|
2021-03-24 16:41:57 +00:00
|
|
|
for dm := range c.derpRecvCh {
|
|
|
|
if c.Closed() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
n, ep := c.processDERPReadResult(dm, b)
|
|
|
|
if n == 0 {
|
|
|
|
// No data read occurred. Wait for another packet.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return n, ep, nil
|
2021-02-07 06:39:58 +00:00
|
|
|
}
|
2021-03-24 16:41:57 +00:00
|
|
|
return 0, nil, net.ErrClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep conn.Endpoint) {
|
2021-02-07 06:39:58 +00:00
|
|
|
if dm.copyBuf == nil {
|
2021-03-24 16:41:57 +00:00
|
|
|
return 0, nil
|
2021-02-07 06:39:58 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
var regionID int
|
|
|
|
n, regionID = dm.n, dm.regionID
|
|
|
|
ncopy := dm.copyBuf(b)
|
|
|
|
if ncopy != n {
|
2021-03-24 16:41:57 +00:00
|
|
|
err := fmt.Errorf("received DERP packet of length %d that's too big for WireGuard buf size %d", n, ncopy)
|
2021-01-18 16:39:52 +00:00
|
|
|
c.logf("magicsock: %v", err)
|
2021-03-24 16:41:57 +00:00
|
|
|
return 0, nil
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
|
2021-05-15 01:07:28 +00:00
|
|
|
ipp := netaddr.IPPortFrom(derpMagicIPAddr, uint16(regionID))
|
2021-01-18 16:39:52 +00:00
|
|
|
if c.handleDiscoMessage(b[:n], ipp) {
|
2021-03-24 16:41:57 +00:00
|
|
|
return 0, nil
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
didNoteRecvActivity bool
|
|
|
|
discoEp *discoEndpoint
|
|
|
|
asEp *addrSet
|
|
|
|
)
|
|
|
|
c.mu.Lock()
|
|
|
|
if dk, ok := c.discoOfNode[tailcfg.NodeKey(dm.src)]; ok {
|
|
|
|
discoEp = c.endpointOfDisco[dk]
|
|
|
|
// If we know about the node (it's in discoOfNode) but don't know about the
|
|
|
|
// endpoint, that's because it's an idle peer that doesn't yet exist in the
|
|
|
|
// wireguard config. So run the receive hook, if defined, which should
|
|
|
|
// create the wireguard peer.
|
|
|
|
if discoEp == nil && c.noteRecvActivity != nil {
|
|
|
|
didNoteRecvActivity = true
|
|
|
|
c.mu.Unlock() // release lock before calling noteRecvActivity
|
2021-04-30 23:27:43 +00:00
|
|
|
c.noteRecvActivity(dk) // (calls back into ParseEndpoint)
|
2021-01-18 16:39:52 +00:00
|
|
|
// Now require the lock. No invariants need to be rechecked; just
|
|
|
|
// 1-2 map lookups follow that are harmless if, say, the peer has
|
|
|
|
// been deleted during this time.
|
|
|
|
c.mu.Lock()
|
|
|
|
|
|
|
|
discoEp = c.endpointOfDisco[dk]
|
2021-01-21 04:47:00 +00:00
|
|
|
c.logf("magicsock: DERP packet received from idle peer %v; created=%v", dm.src.ShortString(), discoEp != nil)
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
if !c.disableLegacy {
|
|
|
|
asEp = c.addrsByKey[dm.src]
|
|
|
|
}
|
|
|
|
c.mu.Unlock()
|
2020-06-30 19:22:42 +00:00
|
|
|
|
2021-01-18 16:39:52 +00:00
|
|
|
if discoEp != nil {
|
|
|
|
ep = discoEp
|
|
|
|
} else if asEp != nil {
|
|
|
|
ep = asEp
|
|
|
|
} else {
|
|
|
|
key := wgkey.Key(dm.src)
|
|
|
|
c.logf("magicsock: DERP packet from unknown key: %s", key.ShortString())
|
2021-02-11 20:39:56 +00:00
|
|
|
ep = c.findEndpoint(ipp, b[:n])
|
2021-01-12 23:28:33 +00:00
|
|
|
if ep == nil {
|
2021-03-24 16:41:57 +00:00
|
|
|
return 0, nil
|
2021-01-12 23:28:33 +00:00
|
|
|
}
|
2021-01-18 16:39:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !didNoteRecvActivity {
|
2020-08-06 21:57:03 +00:00
|
|
|
c.noteRecvActivityFromEndpoint(ep)
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
2021-03-24 16:41:57 +00:00
|
|
|
return n, ep
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
// discoLogLevel controls the verbosity of discovery log messages.
|
|
|
|
type discoLogLevel int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// discoLog means that a message should be logged.
|
|
|
|
discoLog discoLogLevel = iota
|
|
|
|
|
|
|
|
// discoVerboseLog means that a message should only be logged
|
|
|
|
// in TS_DEBUG_DISCO mode.
|
|
|
|
discoVerboseLog
|
|
|
|
)
|
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
func (c *Conn) sendDiscoMessage(dst netaddr.IPPort, dstKey tailcfg.NodeKey, dstDisco tailcfg.DiscoKey, m disco.Message, logLevel discoLogLevel) (sent bool, err error) {
|
2020-07-01 19:56:17 +00:00
|
|
|
c.mu.Lock()
|
2020-07-08 23:50:31 +00:00
|
|
|
if c.closed {
|
|
|
|
c.mu.Unlock()
|
2020-07-30 20:59:23 +00:00
|
|
|
return false, errConnClosed
|
2020-07-08 23:50:31 +00:00
|
|
|
}
|
2020-07-01 19:56:17 +00:00
|
|
|
var nonce [disco.NonceLen]byte
|
|
|
|
if _, err := crand.Read(nonce[:]); err != nil {
|
|
|
|
panic(err) // worth dying for
|
|
|
|
}
|
|
|
|
pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters.
|
|
|
|
pkt = append(pkt, disco.Magic...)
|
|
|
|
pkt = append(pkt, c.discoPublic[:]...)
|
|
|
|
pkt = append(pkt, nonce[:]...)
|
|
|
|
sharedKey := c.sharedDiscoKeyLocked(dstDisco)
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
pkt = box.SealAfterPrecomputation(pkt, m.AppendMarshal(nil), &nonce, sharedKey)
|
2020-07-23 22:15:28 +00:00
|
|
|
sent, err = c.sendAddr(dst, key.Public(dstKey), pkt)
|
2020-07-01 21:39:21 +00:00
|
|
|
if sent {
|
2020-07-18 20:50:08 +00:00
|
|
|
if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco) {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v", c.discoShort, dstDisco.ShortString(), dstKey.ShortString(), derpStr(dst.String()), disco.MessageSummary(m))
|
2020-07-18 20:50:08 +00:00
|
|
|
}
|
2020-07-01 21:39:21 +00:00
|
|
|
} else if err == nil {
|
2020-07-02 17:48:13 +00:00
|
|
|
// Can't send. (e.g. no IPv6 locally)
|
2020-07-01 21:39:21 +00:00
|
|
|
} else {
|
2020-10-06 22:22:46 +00:00
|
|
|
if !c.networkDown() {
|
|
|
|
c.logf("magicsock: disco: failed to send %T to %v: %v", m, dst, err)
|
|
|
|
}
|
2020-07-01 21:39:21 +00:00
|
|
|
}
|
|
|
|
return sent, err
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 02:15:01 +00:00
|
|
|
// handleDiscoMessage handles a discovery message and reports whether
|
|
|
|
// msg was a Tailscale inter-node discovery message.
|
2020-06-26 21:38:53 +00:00
|
|
|
//
|
|
|
|
// A discovery message has the form:
|
|
|
|
//
|
|
|
|
// * magic [6]byte
|
|
|
|
// * senderDiscoPubKey [32]byte
|
|
|
|
// * nonce [24]byte
|
2020-06-30 19:22:42 +00:00
|
|
|
// * naclbox of payload (see tailscale.com/disco package for inner payload format)
|
|
|
|
//
|
|
|
|
// For messages received over DERP, the addr will be derpMagicIP (with
|
|
|
|
// port being the region)
|
2021-02-04 02:15:01 +00:00
|
|
|
func (c *Conn) handleDiscoMessage(msg []byte, src netaddr.IPPort) (isDiscoMsg bool) {
|
2020-07-01 19:56:17 +00:00
|
|
|
const headerLen = len(disco.Magic) + len(tailcfg.DiscoKey{}) + disco.NonceLen
|
|
|
|
if len(msg) < headerLen || string(msg[:len(disco.Magic)]) != disco.Magic {
|
2020-06-26 21:38:53 +00:00
|
|
|
return false
|
|
|
|
}
|
2021-02-04 02:15:01 +00:00
|
|
|
|
|
|
|
// If the first four parts are the prefix of disco.Magic
|
|
|
|
// (0x5453f09f) then it's definitely not a valid Wireguard
|
|
|
|
// packet (which starts with little-endian uint32 1, 2, 3, 4).
|
|
|
|
// Use naked returns for all following paths.
|
|
|
|
isDiscoMsg = true
|
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
var sender tailcfg.DiscoKey
|
2020-07-01 19:56:17 +00:00
|
|
|
copy(sender[:], msg[len(disco.Magic):])
|
2020-06-26 21:38:53 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2020-07-08 23:50:31 +00:00
|
|
|
if c.closed {
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-07-08 23:50:31 +00:00
|
|
|
}
|
2020-07-18 20:50:08 +00:00
|
|
|
if debugDisco {
|
2020-07-02 17:48:13 +00:00
|
|
|
c.logf("magicsock: disco: got disco-looking frame from %v", sender.ShortString())
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
2020-07-30 20:48:32 +00:00
|
|
|
if c.privateKey.IsZero() {
|
|
|
|
// Ignore disco messages when we're stopped.
|
2021-02-04 02:15:01 +00:00
|
|
|
// Still return true, to not pass it down to wireguard.
|
|
|
|
return
|
2020-07-30 20:48:32 +00:00
|
|
|
}
|
2020-06-26 21:38:53 +00:00
|
|
|
if c.discoPrivate.IsZero() {
|
2020-07-18 20:50:08 +00:00
|
|
|
if debugDisco {
|
2020-07-01 19:56:17 +00:00
|
|
|
c.logf("magicsock: disco: ignoring disco-looking frame, no local key")
|
|
|
|
}
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-26 21:38:53 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
peerNode, ok := c.nodeOfDisco[sender]
|
2020-06-26 21:38:53 +00:00
|
|
|
if !ok {
|
2020-07-18 20:50:08 +00:00
|
|
|
if debugDisco {
|
2020-07-23 22:15:28 +00:00
|
|
|
c.logf("magicsock: disco: ignoring disco-looking frame, don't know node for %v", sender.ShortString())
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-26 21:38:53 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 21:57:03 +00:00
|
|
|
needsRecvActivityCall := false
|
|
|
|
de, endpointFound0 := c.endpointOfDisco[sender]
|
|
|
|
if !endpointFound0 {
|
2020-07-23 22:15:28 +00:00
|
|
|
// We don't have an active endpoint for this sender but we knew about the node, so
|
|
|
|
// it's an idle endpoint that doesn't yet exist in the wireguard config. We now have
|
|
|
|
// to notify the userspace engine (via noteRecvActivity) so wireguard-go can create
|
2021-04-30 23:27:43 +00:00
|
|
|
// an Endpoint (ultimately calling our ParseEndpoint).
|
2020-12-15 10:28:51 +00:00
|
|
|
c.logf("magicsock: got disco message from idle peer, starting lazy conf for %v, %v", peerNode.Key.ShortString(), sender.ShortString())
|
2020-07-23 22:15:28 +00:00
|
|
|
if c.noteRecvActivity == nil {
|
|
|
|
c.logf("magicsock: [unexpected] have node without endpoint, without c.noteRecvActivity hook")
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
2020-08-06 21:57:03 +00:00
|
|
|
needsRecvActivityCall = true
|
|
|
|
} else {
|
|
|
|
needsRecvActivityCall = de.isFirstRecvActivityInAwhile()
|
|
|
|
}
|
|
|
|
if needsRecvActivityCall && c.noteRecvActivity != nil {
|
2020-08-06 15:43:48 +00:00
|
|
|
// We can't hold Conn.mu while calling noteRecvActivity.
|
|
|
|
// noteRecvActivity acquires userspaceEngine.wgLock (and per our
|
|
|
|
// lock ordering rules: wgLock must come first), and also calls
|
2021-04-30 23:27:43 +00:00
|
|
|
// back into our Conn.ParseEndpoint, which would double-acquire
|
2020-08-06 15:43:48 +00:00
|
|
|
// Conn.mu.
|
|
|
|
c.mu.Unlock()
|
2020-07-23 22:15:28 +00:00
|
|
|
c.noteRecvActivity(sender)
|
2020-08-06 15:43:48 +00:00
|
|
|
c.mu.Lock() // re-acquire
|
|
|
|
|
|
|
|
// Now, recheck invariants that might've changed while we'd
|
|
|
|
// released the lock, which isn't much:
|
|
|
|
if c.closed || c.privateKey.IsZero() {
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-08-06 15:43:48 +00:00
|
|
|
}
|
2020-07-23 22:15:28 +00:00
|
|
|
de, ok = c.endpointOfDisco[sender]
|
|
|
|
if !ok {
|
2020-08-06 15:43:48 +00:00
|
|
|
if _, ok := c.nodeOfDisco[sender]; !ok {
|
|
|
|
// They just disappeared while we'd released the lock.
|
|
|
|
return false
|
|
|
|
}
|
2020-07-23 22:15:28 +00:00
|
|
|
c.logf("magicsock: [unexpected] lazy endpoint not created for %v, %v", peerNode.Key.ShortString(), sender.ShortString())
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
2020-08-06 21:57:03 +00:00
|
|
|
if !endpointFound0 {
|
|
|
|
c.logf("magicsock: lazy endpoint created via disco message for %v, %v", peerNode.Key.ShortString(), sender.ShortString())
|
|
|
|
}
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
// First, do we even know (and thus care) about this sender? If not,
|
|
|
|
// don't bother decrypting it.
|
|
|
|
|
2020-07-01 19:56:17 +00:00
|
|
|
var nonce [disco.NonceLen]byte
|
|
|
|
copy(nonce[:], msg[len(disco.Magic)+len(key.Public{}):])
|
2020-06-26 21:38:53 +00:00
|
|
|
sealedBox := msg[headerLen:]
|
2020-06-29 21:26:25 +00:00
|
|
|
payload, ok := box.OpenAfterPrecomputation(nil, sealedBox, &nonce, c.sharedDiscoKeyLocked(sender))
|
2020-06-26 21:38:53 +00:00
|
|
|
if !ok {
|
2020-06-30 20:14:41 +00:00
|
|
|
// This might be have been intended for a previous
|
|
|
|
// disco key. When we restart we get a new disco key
|
|
|
|
// and old packets might've still been in flight (or
|
|
|
|
// scheduled). This is particularly the case for LANs
|
|
|
|
// or non-NATed endpoints.
|
2020-07-01 19:56:17 +00:00
|
|
|
// Don't log in normal case. Pass on to wireguard, in case
|
2020-06-30 20:14:41 +00:00
|
|
|
// it's actually a a wireguard packet (super unlikely,
|
|
|
|
// but).
|
2020-07-18 20:50:08 +00:00
|
|
|
if debugDisco {
|
2020-07-01 19:56:17 +00:00
|
|
|
c.logf("magicsock: disco: failed to open naclbox from %v (wrong rcpt?)", sender)
|
|
|
|
}
|
2020-06-30 20:14:41 +00:00
|
|
|
// TODO(bradfitz): add some counter for this that logs rarely
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-26 21:38:53 +00:00
|
|
|
}
|
|
|
|
|
2020-06-30 19:22:42 +00:00
|
|
|
dm, err := disco.Parse(payload)
|
2020-07-18 20:50:08 +00:00
|
|
|
if debugDisco {
|
2020-07-01 19:56:17 +00:00
|
|
|
c.logf("magicsock: disco: disco.Parse = %T, %v", dm, err)
|
|
|
|
}
|
2020-06-30 19:22:42 +00:00
|
|
|
if err != nil {
|
|
|
|
// Couldn't parse it, but it was inside a correctly
|
|
|
|
// signed box, so just ignore it, assuming it's from a
|
|
|
|
// newer version of Tailscale that we don't
|
|
|
|
// understand. Not even worth logging about, lest it
|
|
|
|
// be too spammy for old clients.
|
2020-06-30 20:14:41 +00:00
|
|
|
// TODO(bradfitz): add some counter for this that logs rarely
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch dm := dm.(type) {
|
|
|
|
case *disco.Ping:
|
2020-07-23 22:15:28 +00:00
|
|
|
c.handlePingLocked(dm, de, src, sender, peerNode)
|
2020-06-30 19:22:42 +00:00
|
|
|
case *disco.Pong:
|
2020-07-23 22:15:28 +00:00
|
|
|
if de == nil {
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
2020-07-02 05:15:41 +00:00
|
|
|
de.handlePongConnLocked(dm, src)
|
2021-01-20 19:39:42 +00:00
|
|
|
case *disco.CallMeMaybe:
|
2021-05-15 01:07:28 +00:00
|
|
|
if src.IP() != derpMagicIPAddr {
|
2020-07-18 20:57:26 +00:00
|
|
|
// CallMeMaybe messages should only come via DERP.
|
2020-06-30 20:14:41 +00:00
|
|
|
c.logf("[unexpected] CallMeMaybe packets should only come via DERP")
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
2020-07-23 22:15:28 +00:00
|
|
|
if de != nil {
|
2021-03-19 20:18:02 +00:00
|
|
|
c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints",
|
2021-01-21 16:05:07 +00:00
|
|
|
c.discoShort, de.discoShort,
|
|
|
|
de.publicKey.ShortString(), derpStr(src.String()),
|
|
|
|
len(dm.MyNumber))
|
2021-01-20 20:41:25 +00:00
|
|
|
go de.handleCallMeMaybe(dm)
|
2020-07-23 22:15:28 +00:00
|
|
|
}
|
2020-06-30 19:22:42 +00:00
|
|
|
}
|
2021-02-04 02:15:01 +00:00
|
|
|
return
|
2020-06-26 21:38:53 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
func (c *Conn) handlePingLocked(dm *disco.Ping, de *discoEndpoint, src netaddr.IPPort, sender tailcfg.DiscoKey, peerNode *tailcfg.Node) {
|
|
|
|
if peerNode == nil {
|
|
|
|
c.logf("magicsock: disco: [unexpected] ignoring ping from unknown peer Node")
|
|
|
|
return
|
|
|
|
}
|
2020-08-13 02:25:38 +00:00
|
|
|
likelyHeartBeat := src == de.lastPingFrom && time.Since(de.lastPingTime) < 5*time.Second
|
|
|
|
de.lastPingFrom = src
|
|
|
|
de.lastPingTime = time.Now()
|
2020-07-18 20:50:08 +00:00
|
|
|
if !likelyHeartBeat || debugDisco {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x", c.discoShort, de.discoShort, peerNode.Key.ShortString(), src, dm.TxID[:6])
|
2020-07-18 20:50:08 +00:00
|
|
|
}
|
2020-07-02 15:37:46 +00:00
|
|
|
|
2020-07-03 19:43:39 +00:00
|
|
|
// Remember this route if not present.
|
2020-07-23 22:15:28 +00:00
|
|
|
c.setAddrToDiscoLocked(src, sender, nil)
|
2020-08-13 03:12:56 +00:00
|
|
|
de.addCandidateEndpoint(src)
|
2020-07-02 15:37:46 +00:00
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
ipDst := src
|
|
|
|
discoDest := sender
|
|
|
|
go c.sendDiscoMessage(ipDst, peerNode.Key, discoDest, &disco.Pong{
|
2020-07-02 15:37:46 +00:00
|
|
|
TxID: dm.TxID,
|
|
|
|
Src: src,
|
2020-07-18 20:50:08 +00:00
|
|
|
}, discoVerboseLog)
|
2020-07-02 15:37:46 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 20:41:25 +00:00
|
|
|
// enqueueCallMeMaybe schedules a send of disco.CallMeMaybe to de via derpAddr
|
|
|
|
// once we know that our STUN endpoint is fresh.
|
|
|
|
//
|
|
|
|
// derpAddr is de.derpAddr at the time of send. It's assumed the peer won't be
|
|
|
|
// flipping primary DERPs in the 0-30ms it takes to confirm our STUN endpoint.
|
|
|
|
// If they do, traffic will just go over DERP for a bit longer until the next
|
|
|
|
// discovery round.
|
|
|
|
func (c *Conn) enqueueCallMeMaybe(derpAddr netaddr.IPPort, de *discoEndpoint) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
if !c.lastEndpointsTime.After(time.Now().Add(-endpointsFreshEnoughDuration)) {
|
|
|
|
c.logf("magicsock: want call-me-maybe but endpoints stale; restunning")
|
|
|
|
if c.onEndpointRefreshed == nil {
|
|
|
|
c.onEndpointRefreshed = map[*discoEndpoint]func(){}
|
|
|
|
}
|
|
|
|
c.onEndpointRefreshed[de] = func() {
|
|
|
|
c.logf("magicsock: STUN done; sending call-me-maybe to %v %v", de.discoShort, de.publicKey.ShortString())
|
|
|
|
c.enqueueCallMeMaybe(derpAddr, de)
|
|
|
|
}
|
|
|
|
// TODO(bradfitz): make a new 'reSTUNQuickly' method
|
|
|
|
// that passes down a do-a-lite-netcheck flag down to
|
|
|
|
// netcheck that does 1 (or 2 max) STUN queries
|
|
|
|
// (UDP-only, not HTTPs) to find our port mapping to
|
|
|
|
// our home DERP and maybe one other. For now we do a
|
|
|
|
// "full" ReSTUN which may or may not be a full one
|
|
|
|
// (depending on age) and may do HTTPS timing queries
|
|
|
|
// (if UDP is blocked). Good enough for now.
|
|
|
|
go c.ReSTUN("refresh-for-peering")
|
|
|
|
return
|
|
|
|
}
|
2021-01-20 20:41:25 +00:00
|
|
|
|
|
|
|
eps := make([]netaddr.IPPort, 0, len(c.lastEndpoints))
|
|
|
|
for _, ep := range c.lastEndpoints {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
eps = append(eps, ep.Addr)
|
2021-01-20 20:41:25 +00:00
|
|
|
}
|
|
|
|
go de.sendDiscoMessage(derpAddr, &disco.CallMeMaybe{MyNumber: eps}, discoLog)
|
|
|
|
}
|
|
|
|
|
2020-07-02 15:37:46 +00:00
|
|
|
// setAddrToDiscoLocked records that newk is at src.
|
|
|
|
//
|
|
|
|
// c.mu must be held.
|
|
|
|
//
|
|
|
|
// If the caller already has a discoEndpoint mutex held as well, it
|
|
|
|
// can be passed in as alreadyLocked so it won't be re-acquired during
|
|
|
|
// any lazy cleanup of the mapping.
|
|
|
|
func (c *Conn) setAddrToDiscoLocked(src netaddr.IPPort, newk tailcfg.DiscoKey, alreadyLocked *discoEndpoint) {
|
|
|
|
oldk, ok := c.discoOfAddr[src]
|
|
|
|
if ok && oldk == newk {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ok {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: disco: changing mapping of %v from %x=>%x", src, oldk.ShortString(), newk.ShortString())
|
2020-07-02 15:37:46 +00:00
|
|
|
} else {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: disco: adding mapping of %v to %v", src, newk.ShortString())
|
2020-07-02 15:37:46 +00:00
|
|
|
}
|
|
|
|
c.discoOfAddr[src] = newk
|
|
|
|
if !ok {
|
|
|
|
c.cleanDiscoOfAddrLocked(alreadyLocked)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-02 05:15:41 +00:00
|
|
|
// cleanDiscoOfAddrLocked lazily checks a few entries in c.discoOfAddr
|
|
|
|
// and deletes them if they're stale. It has no pointers in it so we
|
|
|
|
// don't go through the effort of keeping it aggressively
|
|
|
|
// pruned. Instead, we lazily clean it whenever it grows.
|
|
|
|
//
|
|
|
|
// c.mu must be held.
|
|
|
|
//
|
|
|
|
// If the caller already has a discoEndpoint mutex held as well, it
|
|
|
|
// can be passed in as alreadyLocked so it won't be re-acquired.
|
|
|
|
func (c *Conn) cleanDiscoOfAddrLocked(alreadyLocked *discoEndpoint) {
|
|
|
|
// If it's small enough, don't worry about it.
|
|
|
|
if len(c.discoOfAddr) < 16 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
const checkEntries = 5 // per one unit of growth
|
|
|
|
|
|
|
|
// Take advantage of Go's random map iteration to check & clean
|
|
|
|
// a few entries.
|
|
|
|
n := 0
|
|
|
|
for ipp, dk := range c.discoOfAddr {
|
|
|
|
n++
|
|
|
|
if n > checkEntries {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
de, ok := c.endpointOfDisco[dk]
|
|
|
|
if !ok {
|
|
|
|
// This discokey isn't even known anymore. Clean.
|
|
|
|
delete(c.discoOfAddr, ipp)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if de != alreadyLocked {
|
|
|
|
de.mu.Lock()
|
|
|
|
}
|
|
|
|
if _, ok := de.endpointState[ipp]; !ok {
|
|
|
|
// The discoEndpoint no longer knows about that endpoint.
|
|
|
|
// It must've changed. Clean.
|
|
|
|
delete(c.discoOfAddr, ipp)
|
|
|
|
}
|
|
|
|
if de != alreadyLocked {
|
|
|
|
de.mu.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:26:25 +00:00
|
|
|
func (c *Conn) sharedDiscoKeyLocked(k tailcfg.DiscoKey) *[32]byte {
|
|
|
|
if v, ok := c.sharedDiscoKey[k]; ok {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
shared := new([32]byte)
|
|
|
|
box.Precompute(shared, key.Public(k).B32(), c.discoPrivate.B32())
|
|
|
|
c.sharedDiscoKey[k] = shared
|
|
|
|
return shared
|
|
|
|
}
|
|
|
|
|
2020-10-06 22:22:46 +00:00
|
|
|
func (c *Conn) SetNetworkUp(up bool) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.networkUp.Get() == up {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.logf("magicsock: SetNetworkUp(%v)", up)
|
|
|
|
c.networkUp.Set(up)
|
|
|
|
|
2021-02-10 18:04:42 +00:00
|
|
|
if up {
|
|
|
|
c.startDerpHomeConnectLocked()
|
|
|
|
} else {
|
2021-02-20 06:15:41 +00:00
|
|
|
c.portMapper.NoteNetworkDown()
|
2020-10-06 22:22:46 +00:00
|
|
|
c.closeAllDerpLocked("network-down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-22 20:00:40 +00:00
|
|
|
// SetPreferredPort sets the connection's preferred local port.
|
|
|
|
func (c *Conn) SetPreferredPort(port uint16) {
|
|
|
|
if uint16(c.port.Get()) == port {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.port.Set(uint32(port))
|
|
|
|
|
|
|
|
if err := c.rebind(dropCurrentPort); err != nil {
|
|
|
|
c.logf("%w", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.resetEndpointStates()
|
|
|
|
}
|
|
|
|
|
2020-03-02 17:31:25 +00:00
|
|
|
// SetPrivateKey sets the connection's private key.
|
|
|
|
//
|
|
|
|
// This is only used to be able prove our identity when connecting to
|
|
|
|
// DERP servers.
|
|
|
|
//
|
|
|
|
// If the private key changes, any DERP connections are torn down &
|
|
|
|
// recreated when needed.
|
2020-12-30 01:22:56 +00:00
|
|
|
func (c *Conn) SetPrivateKey(privateKey wgkey.Private) error {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-02-28 19:13:28 +00:00
|
|
|
|
|
|
|
oldKey, newKey := c.privateKey, key.Private(privateKey)
|
|
|
|
if newKey == oldKey {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
c.privateKey = newKey
|
2021-02-04 02:15:01 +00:00
|
|
|
c.havePrivateKey.Set(!newKey.IsZero())
|
2020-02-28 19:13:28 +00:00
|
|
|
|
2020-03-24 15:09:30 +00:00
|
|
|
if oldKey.IsZero() {
|
2020-07-27 23:26:33 +00:00
|
|
|
c.everHadKey = true
|
2020-03-24 15:09:30 +00:00
|
|
|
c.logf("magicsock: SetPrivateKey called (init)")
|
2020-08-04 16:36:38 +00:00
|
|
|
if c.started {
|
|
|
|
go c.ReSTUN("set-private-key")
|
|
|
|
}
|
2020-07-27 17:19:05 +00:00
|
|
|
} else if newKey.IsZero() {
|
|
|
|
c.logf("magicsock: SetPrivateKey called (zeroed)")
|
|
|
|
c.closeAllDerpLocked("zero-private-key")
|
2021-01-20 17:52:24 +00:00
|
|
|
c.stopPeriodicReSTUNTimerLocked()
|
|
|
|
c.onEndpointRefreshed = nil
|
2020-03-24 15:09:30 +00:00
|
|
|
} else {
|
2020-07-27 17:19:05 +00:00
|
|
|
c.logf("magicsock: SetPrivateKey called (changed)")
|
|
|
|
c.closeAllDerpLocked("new-private-key")
|
2020-03-24 15:09:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Key changed. Close existing DERP connections and reconnect to home.
|
2020-07-27 17:19:05 +00:00
|
|
|
if c.myDerp != 0 && !newKey.IsZero() {
|
2020-04-09 21:21:36 +00:00
|
|
|
c.logf("magicsock: private key changed, reconnecting to home derp-%d", c.myDerp)
|
2021-02-10 18:04:42 +00:00
|
|
|
c.startDerpHomeConnectLocked()
|
2020-03-24 15:09:30 +00:00
|
|
|
}
|
2020-03-02 17:31:25 +00:00
|
|
|
|
2020-07-30 20:48:32 +00:00
|
|
|
if newKey.IsZero() {
|
|
|
|
for _, de := range c.endpointOfDisco {
|
|
|
|
de.stopAndReset()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-02 17:31:25 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-18 15:48:01 +00:00
|
|
|
// UpdatePeers is called when the set of WireGuard peers changes. It
|
|
|
|
// then removes any state for old peers.
|
|
|
|
//
|
|
|
|
// The caller passes ownership of newPeers map to UpdatePeers.
|
|
|
|
func (c *Conn) UpdatePeers(newPeers map[key.Public]struct{}) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
oldPeers := c.peerSet
|
|
|
|
c.peerSet = newPeers
|
|
|
|
|
|
|
|
// Clean up any key.Public-keyed maps for peers that no longer
|
|
|
|
// exist.
|
|
|
|
for peer := range oldPeers {
|
|
|
|
if _, ok := newPeers[peer]; !ok {
|
|
|
|
delete(c.addrsByKey, peer)
|
|
|
|
delete(c.derpRoute, peer)
|
|
|
|
delete(c.peerLastDerp, peer)
|
|
|
|
}
|
|
|
|
}
|
2020-04-28 20:41:18 +00:00
|
|
|
|
|
|
|
if len(oldPeers) == 0 && len(newPeers) > 0 {
|
|
|
|
go c.ReSTUN("non-zero-peers")
|
|
|
|
}
|
2020-04-18 15:48:01 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
// SetDERPMap controls which (if any) DERP servers are used.
|
|
|
|
// A nil value means to disable DERP; it's disabled by default.
|
|
|
|
func (c *Conn) SetDERPMap(dm *tailcfg.DERPMap) {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-03-04 20:21:40 +00:00
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
if reflect.DeepEqual(dm, c.derpMap) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.derpMap = dm
|
|
|
|
if dm == nil {
|
2020-03-23 21:12:23 +00:00
|
|
|
c.closeAllDerpLocked("derp-disabled")
|
2020-05-17 16:51:38 +00:00
|
|
|
return
|
2020-03-04 20:21:40 +00:00
|
|
|
}
|
2020-05-17 16:51:38 +00:00
|
|
|
|
2020-07-27 17:19:05 +00:00
|
|
|
if c.started {
|
|
|
|
go c.ReSTUN("derp-map-update")
|
|
|
|
}
|
2020-03-04 20:21:40 +00:00
|
|
|
}
|
|
|
|
|
2020-07-26 02:37:08 +00:00
|
|
|
func nodesEqual(x, y []*tailcfg.Node) bool {
|
|
|
|
if len(x) != len(y) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range x {
|
|
|
|
if !x[i].Equal(y[i]) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-06-25 18:04:52 +00:00
|
|
|
// SetNetworkMap is called when the control client gets a new network
|
2020-07-26 02:37:08 +00:00
|
|
|
// map from the control server. It must always be non-nil.
|
2020-06-25 18:04:52 +00:00
|
|
|
//
|
|
|
|
// It should not use the DERPMap field of NetworkMap; that's
|
|
|
|
// conditionally sent to SetDERPMap instead.
|
2021-02-05 23:44:46 +00:00
|
|
|
func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) {
|
2020-06-25 18:04:52 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2020-07-26 02:37:08 +00:00
|
|
|
if c.netMap != nil && nodesEqual(c.netMap.Peers, nm.Peers) {
|
2020-06-25 18:04:52 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
numDisco := 0
|
|
|
|
for _, n := range nm.Peers {
|
2020-06-28 18:53:37 +00:00
|
|
|
if n.DiscoKey.IsZero() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
numDisco++
|
2021-02-23 22:26:29 +00:00
|
|
|
if ep, ok := c.endpointOfDisco[n.DiscoKey]; ok && ep.publicKey == n.Key {
|
2020-06-28 18:53:37 +00:00
|
|
|
ep.updateFromNode(n)
|
2021-02-23 22:26:29 +00:00
|
|
|
} else if ok {
|
|
|
|
c.logf("magicsock: disco key %v changed from node key %v to %v", n.DiscoKey, ep.publicKey.ShortString(), n.Key.ShortString())
|
|
|
|
ep.stopAndReset()
|
|
|
|
delete(c.endpointOfDisco, n.DiscoKey)
|
2020-06-26 21:38:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: got updated network map; %d peers (%d with discokey)", len(nm.Peers), numDisco)
|
2020-06-25 18:04:52 +00:00
|
|
|
c.netMap = nm
|
2020-06-26 21:38:53 +00:00
|
|
|
|
|
|
|
// Build and/or update node<->disco maps, only reallocating if
|
|
|
|
// the set of discokeys changed.
|
|
|
|
for pass := 1; pass <= 2; pass++ {
|
|
|
|
if c.nodeOfDisco == nil || pass == 2 {
|
2020-06-28 18:53:37 +00:00
|
|
|
c.nodeOfDisco = map[tailcfg.DiscoKey]*tailcfg.Node{}
|
2020-06-26 21:38:53 +00:00
|
|
|
c.discoOfNode = map[tailcfg.NodeKey]tailcfg.DiscoKey{}
|
|
|
|
}
|
|
|
|
for _, n := range nm.Peers {
|
|
|
|
if !n.DiscoKey.IsZero() {
|
2020-06-28 18:53:37 +00:00
|
|
|
c.nodeOfDisco[n.DiscoKey] = n
|
2020-06-26 21:38:53 +00:00
|
|
|
if old, ok := c.discoOfNode[n.Key]; ok && old != n.DiscoKey {
|
|
|
|
c.logf("magicsock: node %s changed discovery key from %x to %x", n.Key.ShortString(), old[:8], n.DiscoKey[:8])
|
|
|
|
}
|
|
|
|
c.discoOfNode[n.Key] = n.DiscoKey
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(c.nodeOfDisco) == numDisco && len(c.discoOfNode) == numDisco {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-28 18:53:37 +00:00
|
|
|
// Clean c.endpointOfDisco for discovery keys that are no longer present.
|
|
|
|
for dk, de := range c.endpointOfDisco {
|
|
|
|
if _, ok := c.nodeOfDisco[dk]; !ok {
|
2020-07-30 20:48:32 +00:00
|
|
|
de.stopAndReset()
|
2020-06-28 18:53:37 +00:00
|
|
|
delete(c.endpointOfDisco, dk)
|
2020-06-29 21:26:25 +00:00
|
|
|
delete(c.sharedDiscoKey, dk)
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-25 18:04:52 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
func (c *Conn) wantDerpLocked() bool { return c.derpMap != nil }
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// c.mu must be held.
|
2020-03-23 21:12:23 +00:00
|
|
|
func (c *Conn) closeAllDerpLocked(why string) {
|
|
|
|
if len(c.activeDerp) == 0 {
|
|
|
|
return // without the useless log statement
|
|
|
|
}
|
2020-03-05 16:54:08 +00:00
|
|
|
for i := range c.activeDerp {
|
2020-03-23 21:12:23 +00:00
|
|
|
c.closeDerpLocked(i, why)
|
2020-02-28 19:13:28 +00:00
|
|
|
}
|
2020-03-23 21:12:23 +00:00
|
|
|
c.logActiveDerpLocked()
|
2020-03-05 16:54:08 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// c.mu must be held.
|
2020-03-23 21:12:23 +00:00
|
|
|
// It is the responsibility of the caller to call logActiveDerpLocked after any set of closes.
|
|
|
|
func (c *Conn) closeDerpLocked(node int, why string) {
|
2020-03-05 16:54:08 +00:00
|
|
|
if ad, ok := c.activeDerp[node]; ok {
|
2020-03-24 15:09:30 +00:00
|
|
|
c.logf("magicsock: closing connection to derp-%v (%v), age %v", node, why, time.Since(ad.createTime).Round(time.Second))
|
2020-03-05 16:54:08 +00:00
|
|
|
go ad.c.Close()
|
|
|
|
ad.cancel()
|
|
|
|
delete(c.activeDerp, node)
|
2020-02-28 19:13:28 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 21:12:23 +00:00
|
|
|
// c.mu must be held.
|
|
|
|
func (c *Conn) logActiveDerpLocked() {
|
|
|
|
now := time.Now()
|
2020-05-31 22:29:04 +00:00
|
|
|
c.logf("magicsock: %v active derp conns%s", len(c.activeDerp), logger.ArgWriter(func(buf *bufio.Writer) {
|
|
|
|
if len(c.activeDerp) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
buf.WriteString(":")
|
|
|
|
c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) {
|
|
|
|
fmt.Fprintf(buf, " derp-%d=cr%v,wr%v", node, simpleDur(now.Sub(ad.createTime)), simpleDur(now.Sub(*ad.lastWrite)))
|
|
|
|
})
|
|
|
|
}))
|
2020-03-23 21:12:23 +00:00
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (c *Conn) logEndpointChange(endpoints []tailcfg.Endpoint) {
|
2020-05-31 22:29:04 +00:00
|
|
|
c.logf("magicsock: endpoints changed: %s", logger.ArgWriter(func(buf *bufio.Writer) {
|
|
|
|
for i, ep := range endpoints {
|
|
|
|
if i > 0 {
|
|
|
|
buf.WriteString(", ")
|
|
|
|
}
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
fmt.Fprintf(buf, "%s (%s)", ep.Addr, ep.Type)
|
2020-03-24 15:09:30 +00:00
|
|
|
}
|
2020-05-31 22:29:04 +00:00
|
|
|
}))
|
2020-03-24 15:09:30 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 21:12:23 +00:00
|
|
|
// c.mu must be held.
|
2020-05-17 16:51:38 +00:00
|
|
|
func (c *Conn) foreachActiveDerpSortedLocked(fn func(regionID int, ad activeDerp)) {
|
2020-03-23 21:12:23 +00:00
|
|
|
if len(c.activeDerp) < 2 {
|
|
|
|
for id, ad := range c.activeDerp {
|
|
|
|
fn(id, ad)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ids := make([]int, 0, len(c.activeDerp))
|
|
|
|
for id := range c.activeDerp {
|
|
|
|
ids = append(ids, id)
|
|
|
|
}
|
|
|
|
sort.Ints(ids)
|
|
|
|
for _, id := range ids {
|
|
|
|
fn(id, c.activeDerp[id])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 20:47:54 +00:00
|
|
|
func (c *Conn) cleanStaleDerp() {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2021-01-19 23:29:50 +00:00
|
|
|
if c.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.derpCleanupTimerArmed = false
|
|
|
|
|
|
|
|
tooOld := time.Now().Add(-derpInactiveCleanupTime)
|
2020-03-23 21:12:23 +00:00
|
|
|
dirty := false
|
2021-01-19 23:29:50 +00:00
|
|
|
someNonHomeOpen := false
|
2020-03-05 20:47:54 +00:00
|
|
|
for i, ad := range c.activeDerp {
|
|
|
|
if i == c.myDerp {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ad.lastWrite.Before(tooOld) {
|
2020-03-23 21:12:23 +00:00
|
|
|
c.closeDerpLocked(i, "idle")
|
|
|
|
dirty = true
|
2021-01-19 23:29:50 +00:00
|
|
|
} else {
|
|
|
|
someNonHomeOpen = true
|
2020-03-05 20:47:54 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-23 21:12:23 +00:00
|
|
|
if dirty {
|
|
|
|
c.logActiveDerpLocked()
|
|
|
|
}
|
2021-01-19 23:29:50 +00:00
|
|
|
if someNonHomeOpen {
|
|
|
|
c.scheduleCleanStaleDerpLocked()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) scheduleCleanStaleDerpLocked() {
|
|
|
|
if c.derpCleanupTimerArmed {
|
|
|
|
// Already going to fire soon. Let the existing one
|
|
|
|
// fire lest it get infinitely delayed by repeated
|
|
|
|
// calls to scheduleCleanStaleDerpLocked.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.derpCleanupTimerArmed = true
|
|
|
|
if c.derpCleanupTimer != nil {
|
|
|
|
c.derpCleanupTimer.Reset(derpCleanStaleInterval)
|
|
|
|
} else {
|
|
|
|
c.derpCleanupTimer = time.AfterFunc(derpCleanStaleInterval, c.cleanStaleDerp)
|
|
|
|
}
|
2020-03-05 20:47:54 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 06:55:14 +00:00
|
|
|
// DERPs reports the number of active DERP connections.
|
|
|
|
func (c *Conn) DERPs() int {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
return len(c.activeDerp)
|
|
|
|
}
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// Bind returns the wireguard-go conn.Bind for c.
|
|
|
|
func (c *Conn) Bind() conn.Bind {
|
|
|
|
return c.bind
|
|
|
|
}
|
|
|
|
|
|
|
|
// connBind is a wireguard-go conn.Bind for a Conn.
|
2021-04-03 01:36:24 +00:00
|
|
|
// It bridges the behavior of wireguard-go and a Conn.
|
|
|
|
// wireguard-go calls Close then Open on device.Up.
|
|
|
|
// That won't work well for a Conn, which is only closed on shutdown.
|
|
|
|
// The subsequent Close is a real close.
|
2021-03-24 16:41:57 +00:00
|
|
|
type connBind struct {
|
|
|
|
*Conn
|
|
|
|
mu sync.Mutex
|
|
|
|
closed bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open is called by WireGuard to create a UDP binding.
|
|
|
|
// The ignoredPort comes from wireguard-go, via the wgcfg config.
|
|
|
|
// We ignore that port value here, since we have the local port available easily.
|
|
|
|
func (c *connBind) Open(ignoredPort uint16) ([]conn.ReceiveFunc, uint16, error) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if !c.closed {
|
|
|
|
return nil, 0, errors.New("magicsock: connBind already open")
|
|
|
|
}
|
|
|
|
c.closed = false
|
2021-04-28 17:43:51 +00:00
|
|
|
fns := []conn.ReceiveFunc{c.receiveIPv4, c.receiveIPv6, c.receiveDERP}
|
2021-03-24 16:41:57 +00:00
|
|
|
// TODO: Combine receiveIPv4 and receiveIPv6 and receiveIP into a single
|
|
|
|
// closure that closes over a *RebindingUDPConn?
|
|
|
|
return fns, c.LocalPort(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetMark is used by wireguard-go to set a mark bit for packets to avoid routing loops.
|
|
|
|
// We handle that ourselves elsewhere.
|
|
|
|
func (c *connBind) SetMark(value uint32) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-03 01:36:24 +00:00
|
|
|
// Close closes the connBind, unless it is already closed.
|
2021-03-24 16:41:57 +00:00
|
|
|
func (c *connBind) Close() error {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
c.closed = true
|
|
|
|
// Unblock all outstanding receives.
|
2021-04-03 01:36:24 +00:00
|
|
|
c.pconn4.Close()
|
2021-04-28 17:43:51 +00:00
|
|
|
c.pconn6.Close()
|
2021-03-24 16:41:57 +00:00
|
|
|
// Send an empty read result to unblock receiveDERP,
|
|
|
|
// which will then check connBind.Closed.
|
|
|
|
c.derpRecvCh <- derpReadResult{}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Closed reports whether c is closed.
|
|
|
|
func (c *connBind) Closed() bool {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
return c.closed
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
// Close closes the connection.
|
|
|
|
//
|
|
|
|
// Only the first close does anything. Any later closes return nil.
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *Conn) Close() error {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
2021-01-15 00:51:17 +00:00
|
|
|
defer c.mu.Unlock()
|
2020-03-13 15:55:38 +00:00
|
|
|
if c.closed {
|
2020-02-18 21:32:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-01-19 23:29:50 +00:00
|
|
|
if c.derpCleanupTimerArmed {
|
|
|
|
c.derpCleanupTimer.Stop()
|
|
|
|
}
|
2021-01-20 17:52:24 +00:00
|
|
|
c.stopPeriodicReSTUNTimerLocked()
|
2021-02-20 06:15:41 +00:00
|
|
|
c.portMapper.Close()
|
2020-03-02 17:31:25 +00:00
|
|
|
|
2020-07-08 23:50:31 +00:00
|
|
|
for _, ep := range c.endpointOfDisco {
|
2020-07-30 20:48:32 +00:00
|
|
|
ep.stopAndReset()
|
2020-07-08 23:50:31 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
c.closed = true
|
|
|
|
c.connCtxCancel()
|
2020-03-23 21:12:23 +00:00
|
|
|
c.closeAllDerpLocked("conn-close")
|
2021-04-03 01:36:24 +00:00
|
|
|
// Ignore errors from c.pconnN.Close.
|
|
|
|
// They will frequently have been closed already by a call to connBind.Close.
|
2020-03-19 16:39:00 +00:00
|
|
|
if c.pconn6 != nil {
|
|
|
|
c.pconn6.Close()
|
|
|
|
}
|
2021-04-03 01:36:24 +00:00
|
|
|
c.pconn4.Close()
|
2020-08-04 16:36:38 +00:00
|
|
|
|
|
|
|
// Wait on goroutines updating right at the end, once everything is
|
|
|
|
// already closed. We want everything else in the Conn to be
|
|
|
|
// consistently in the closed state before we release mu to wait
|
|
|
|
// on the endpoint updater & derphttp.Connect.
|
|
|
|
for c.goroutinesRunningLocked() {
|
|
|
|
c.muCond.Wait()
|
|
|
|
}
|
2021-04-03 01:36:24 +00:00
|
|
|
return nil
|
2020-08-04 16:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) goroutinesRunningLocked() bool {
|
|
|
|
if c.endpointsUpdateActive {
|
|
|
|
return true
|
|
|
|
}
|
2020-07-14 17:07:46 +00:00
|
|
|
// The goroutine running dc.Connect in derpWriteChanOfAddr may linger
|
|
|
|
// and appear to leak, as observed in https://github.com/tailscale/tailscale/issues/554.
|
|
|
|
// This is despite the underlying context being cancelled by connCtxCancel above.
|
|
|
|
// To avoid this condition, we must wait on derpStarted here
|
|
|
|
// to ensure that this goroutine has exited by the time Close returns.
|
|
|
|
// We only do this if derpWriteChanOfAddr has executed at least once:
|
|
|
|
// on the first run, it sets firstDerp := true and spawns the aforementioned goroutine.
|
|
|
|
// To detect this, we check activeDerp, which is initialized to non-nil on the first run.
|
|
|
|
if c.activeDerp != nil {
|
2020-08-04 16:36:38 +00:00
|
|
|
select {
|
|
|
|
case <-c.derpStarted:
|
|
|
|
break
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
2020-04-27 20:03:22 +00:00
|
|
|
}
|
2020-08-04 16:36:38 +00:00
|
|
|
return false
|
2020-03-13 15:55:38 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 21:19:12 +00:00
|
|
|
func maxIdleBeforeSTUNShutdown() time.Duration {
|
|
|
|
if debugReSTUNStopOnIdle {
|
2021-01-20 17:52:24 +00:00
|
|
|
return 45 * time.Second
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
2021-01-20 17:52:24 +00:00
|
|
|
return sessionActiveTimeout
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 17:52:24 +00:00
|
|
|
func (c *Conn) shouldDoPeriodicReSTUNLocked() bool {
|
2020-10-06 22:22:46 +00:00
|
|
|
if c.networkDown() {
|
|
|
|
return false
|
|
|
|
}
|
2021-01-20 17:52:24 +00:00
|
|
|
if len(c.peerSet) == 0 || c.privateKey.IsZero() {
|
|
|
|
// If no peers, not worth doing.
|
|
|
|
// Also don't if there's no key (not running).
|
2020-06-25 21:19:12 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if f := c.idleFunc; f != nil {
|
|
|
|
idleFor := f()
|
|
|
|
if debugReSTUNStopOnIdle {
|
|
|
|
c.logf("magicsock: periodicReSTUN: idle for %v", idleFor.Round(time.Second))
|
|
|
|
}
|
|
|
|
if idleFor > maxIdleBeforeSTUNShutdown() {
|
2021-01-20 17:52:24 +00:00
|
|
|
if c.netMap != nil && c.netMap.Debug != nil && c.netMap.Debug.ForceBackgroundSTUN {
|
|
|
|
// Overridden by control.
|
|
|
|
return true
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
2021-01-20 17:52:24 +00:00
|
|
|
return false
|
2020-06-25 21:19:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
2020-04-28 20:41:18 +00:00
|
|
|
}
|
|
|
|
|
2021-07-09 17:01:50 +00:00
|
|
|
func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") }
|
|
|
|
|
2020-03-13 03:10:11 +00:00
|
|
|
// ReSTUN triggers an address discovery.
|
|
|
|
// The provided why string is for debug logging only.
|
|
|
|
func (c *Conn) ReSTUN(why string) {
|
2020-03-13 15:55:38 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-05-17 16:51:38 +00:00
|
|
|
if !c.started {
|
|
|
|
panic("call to ReSTUN before Start")
|
|
|
|
}
|
2020-04-27 20:03:22 +00:00
|
|
|
if c.closed {
|
|
|
|
// raced with a shutdown.
|
|
|
|
return
|
|
|
|
}
|
2020-07-27 23:26:33 +00:00
|
|
|
|
|
|
|
// If the user stopped the app, stop doing work. (When the
|
|
|
|
// user stops Tailscale via the GUI apps, ipn/local.go
|
|
|
|
// reconfigures the engine with a zero private key.)
|
|
|
|
//
|
|
|
|
// This used to just check c.privateKey.IsZero, but that broke
|
|
|
|
// some end-to-end tests tests that didn't ever set a private
|
|
|
|
// key somehow. So for now, only stop doing work if we ever
|
|
|
|
// had a key, which helps real users, but appeases tests for
|
|
|
|
// now. TODO: rewrite those tests to be less brittle or more
|
|
|
|
// realistic.
|
|
|
|
if c.privateKey.IsZero() && c.everHadKey {
|
|
|
|
c.logf("magicsock: ReSTUN(%q) ignored; stopped, no private key", why)
|
2020-07-27 17:19:05 +00:00
|
|
|
return
|
|
|
|
}
|
2020-04-27 20:03:22 +00:00
|
|
|
|
2020-03-13 15:55:38 +00:00
|
|
|
if c.endpointsUpdateActive {
|
|
|
|
if c.wantEndpointsUpdate != why {
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] magicsock: ReSTUN: endpoint update active, need another later (%q)", why)
|
2020-03-13 15:55:38 +00:00
|
|
|
c.wantEndpointsUpdate = why
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c.endpointsUpdateActive = true
|
|
|
|
go c.updateEndpoints(why)
|
2020-02-18 18:55:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-19 16:39:00 +00:00
|
|
|
func (c *Conn) initialBind() error {
|
2021-06-22 20:00:40 +00:00
|
|
|
if err := c.bindSocket(&c.pconn4, "udp4", keepCurrentPort); err != nil {
|
2021-04-27 21:40:29 +00:00
|
|
|
return fmt.Errorf("magicsock: initialBind IPv4 failed: %w", err)
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
2021-02-20 06:15:41 +00:00
|
|
|
c.portMapper.SetLocalPort(c.LocalPort())
|
2021-06-22 20:00:40 +00:00
|
|
|
if err := c.bindSocket(&c.pconn6, "udp6", keepCurrentPort); err != nil {
|
2020-03-24 05:11:49 +00:00
|
|
|
c.logf("magicsock: ignoring IPv6 bind failure: %v", err)
|
2020-03-19 16:39:00 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-28 17:28:44 +00:00
|
|
|
// listenPacket opens a packet listener.
|
|
|
|
// The network must be "udp4" or "udp6".
|
|
|
|
// Host is the (local) IP address to listen on; use the zero IP to leave unspecified.
|
|
|
|
func (c *Conn) listenPacket(network string, host netaddr.IP, port uint16) (net.PacketConn, error) {
|
2021-04-27 19:56:28 +00:00
|
|
|
ctx := context.Background() // unused without DNS name to resolve
|
2021-04-28 17:28:44 +00:00
|
|
|
// Translate host to package net: "" for the zero value, the IP address string otherwise.
|
|
|
|
var s string
|
|
|
|
if !host.IsZero() {
|
|
|
|
s = host.String()
|
|
|
|
}
|
|
|
|
addr := net.JoinHostPort(s, fmt.Sprint(port))
|
2020-07-10 21:26:04 +00:00
|
|
|
if c.packetListener != nil {
|
|
|
|
return c.packetListener.ListenPacket(ctx, network, addr)
|
|
|
|
}
|
|
|
|
return netns.Listener().ListenPacket(ctx, network, addr)
|
|
|
|
}
|
|
|
|
|
2021-04-27 21:40:29 +00:00
|
|
|
// bindSocket initializes rucPtr if necessary and binds a UDP socket to it.
|
|
|
|
// Network indicates the UDP socket type; it must be "udp4" or "udp6".
|
|
|
|
// If rucPtr had an existing UDP socket bound, it closes that socket.
|
|
|
|
// The caller is responsible for informing the portMapper of any changes.
|
2021-06-22 20:00:40 +00:00
|
|
|
// If curPortFate is set to dropCurrentPort, no attempt is made to reuse
|
|
|
|
// the current port.
|
|
|
|
func (c *Conn) bindSocket(rucPtr **RebindingUDPConn, network string, curPortFate currentPortFate) error {
|
2021-04-28 17:28:44 +00:00
|
|
|
var host netaddr.IP
|
2020-10-28 15:23:12 +00:00
|
|
|
if inTest() && !c.simulatedNetwork {
|
2021-04-28 17:28:44 +00:00
|
|
|
switch network {
|
|
|
|
case "udp4":
|
|
|
|
host = netaddr.MustParseIP("127.0.0.1")
|
|
|
|
case "udp6":
|
|
|
|
host = netaddr.MustParseIP("::1")
|
|
|
|
default:
|
|
|
|
panic("unrecognized network in bindSocket: " + network)
|
2020-11-10 17:11:21 +00:00
|
|
|
}
|
2020-04-29 02:20:02 +00:00
|
|
|
}
|
2020-03-19 16:39:00 +00:00
|
|
|
|
2021-04-27 21:40:29 +00:00
|
|
|
if *rucPtr == nil {
|
|
|
|
*rucPtr = new(RebindingUDPConn)
|
2020-04-29 02:20:02 +00:00
|
|
|
}
|
2021-04-27 21:40:29 +00:00
|
|
|
ruc := *rucPtr
|
|
|
|
|
|
|
|
// Hold the ruc lock the entire time, so that the close+bind is atomic
|
|
|
|
// from the perspective of ruc receive functions.
|
|
|
|
ruc.mu.Lock()
|
|
|
|
defer ruc.mu.Unlock()
|
2021-03-08 23:48:49 +00:00
|
|
|
|
2021-07-13 23:01:37 +00:00
|
|
|
if debugAlwaysDERP {
|
|
|
|
c.logf("disabled %v per TS_DEBUG_ALWAYS_USE_DERP", network)
|
|
|
|
ruc.pconn = newBlockForeverConn()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-27 21:40:29 +00:00
|
|
|
// Build a list of preferred ports.
|
|
|
|
// Best is the port that the user requested.
|
|
|
|
// Second best is the port that is currently in use.
|
|
|
|
// If those fail, fall back to 0.
|
|
|
|
var ports []uint16
|
2021-06-22 20:00:40 +00:00
|
|
|
if port := uint16(c.port.Get()); port != 0 {
|
|
|
|
ports = append(ports, port)
|
2021-04-27 21:40:29 +00:00
|
|
|
}
|
2021-06-22 20:00:40 +00:00
|
|
|
if ruc.pconn != nil && curPortFate == keepCurrentPort {
|
2021-04-27 21:40:29 +00:00
|
|
|
curPort := uint16(ruc.localAddrLocked().Port)
|
|
|
|
ports = append(ports, curPort)
|
|
|
|
}
|
|
|
|
ports = append(ports, 0)
|
|
|
|
// Remove duplicates. (All duplicates are consecutive.)
|
|
|
|
uniq.ModifySlice(&ports, func(i, j int) bool { return ports[i] == ports[j] })
|
|
|
|
|
|
|
|
var pconn net.PacketConn
|
|
|
|
for _, port := range ports {
|
|
|
|
// Close the existing conn, in case it is sitting on the port we want.
|
|
|
|
err := ruc.closeLocked()
|
|
|
|
if err != nil && !errors.Is(err, net.ErrClosed) && !errors.Is(err, errNilPConn) {
|
|
|
|
c.logf("magicsock: bindSocket %v close failed: %v", network, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-04-27 21:40:29 +00:00
|
|
|
// Open a new one with the desired port.
|
|
|
|
pconn, err = c.listenPacket(network, host, port)
|
2021-03-08 23:48:49 +00:00
|
|
|
if err != nil {
|
2021-04-27 21:40:29 +00:00
|
|
|
c.logf("magicsock: unable to bind %v port %d: %v", network, port, err)
|
|
|
|
continue
|
2021-03-08 23:48:49 +00:00
|
|
|
}
|
2021-04-27 21:40:29 +00:00
|
|
|
// Success.
|
|
|
|
ruc.pconn = pconn
|
2021-04-28 17:36:54 +00:00
|
|
|
if network == "udp4" {
|
|
|
|
health.SetUDP4Unbound(false)
|
|
|
|
}
|
2021-04-27 21:40:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Failed to bind, including on port 0 (!).
|
|
|
|
// Set pconn to a dummy conn whose reads block until closed.
|
|
|
|
// This keeps the receive funcs alive for a future in which
|
|
|
|
// we get a link change and we can try binding again.
|
|
|
|
ruc.pconn = newBlockForeverConn()
|
2021-04-28 17:36:54 +00:00
|
|
|
if network == "udp4" {
|
|
|
|
health.SetUDP4Unbound(true)
|
|
|
|
}
|
2021-04-27 21:40:29 +00:00
|
|
|
return fmt.Errorf("failed to bind any ports (tried %v)", ports)
|
|
|
|
}
|
|
|
|
|
2021-06-22 20:00:40 +00:00
|
|
|
type currentPortFate uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
keepCurrentPort = currentPortFate(0)
|
|
|
|
dropCurrentPort = currentPortFate(1)
|
|
|
|
)
|
|
|
|
|
|
|
|
// rebind closes and re-binds the UDP sockets.
|
|
|
|
// We consider it successful if we manage to bind the IPv4 socket.
|
|
|
|
func (c *Conn) rebind(curPortFate currentPortFate) error {
|
|
|
|
if err := c.bindSocket(&c.pconn4, "udp4", curPortFate); err != nil {
|
|
|
|
return fmt.Errorf("magicsock: Rebind IPv4 failed: %w", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-02-20 06:15:41 +00:00
|
|
|
c.portMapper.SetLocalPort(c.LocalPort())
|
2021-06-22 20:00:40 +00:00
|
|
|
if err := c.bindSocket(&c.pconn6, "udp6", curPortFate); err != nil {
|
2021-04-27 21:40:29 +00:00
|
|
|
c.logf("magicsock: Rebind ignoring IPv6 bind failure: %v", err)
|
|
|
|
}
|
2021-06-22 20:00:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rebind closes and re-binds the UDP sockets and resets the DERP connection.
|
|
|
|
// It should be followed by a call to ReSTUN.
|
|
|
|
func (c *Conn) Rebind() {
|
|
|
|
if err := c.rebind(keepCurrentPort); err != nil {
|
|
|
|
c.logf("%w", err)
|
|
|
|
return
|
|
|
|
}
|
2020-04-09 21:21:36 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
c.closeAllDerpLocked("rebind")
|
2021-02-10 18:04:42 +00:00
|
|
|
if !c.privateKey.IsZero() {
|
|
|
|
c.startDerpHomeConnectLocked()
|
|
|
|
}
|
2020-04-09 21:21:36 +00:00
|
|
|
c.mu.Unlock()
|
2020-07-27 17:19:05 +00:00
|
|
|
|
2020-12-18 08:31:48 +00:00
|
|
|
c.resetEndpointStates()
|
2020-04-10 05:25:31 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 08:31:48 +00:00
|
|
|
// resetEndpointStates resets the preferred address for all peers and
|
2020-04-10 05:25:31 +00:00
|
|
|
// re-enables spraying.
|
|
|
|
// This is called when connectivity changes enough that we no longer
|
|
|
|
// trust the old routes.
|
2020-12-18 08:31:48 +00:00
|
|
|
func (c *Conn) resetEndpointStates() {
|
2020-04-10 05:25:31 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2020-06-28 18:53:37 +00:00
|
|
|
for _, de := range c.endpointOfDisco {
|
|
|
|
de.noteConnectivityChange()
|
|
|
|
}
|
2020-12-18 08:31:48 +00:00
|
|
|
c.resetAddrSetStatesLocked()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-06-30 19:22:42 +00:00
|
|
|
// packIPPort packs an IPPort into the form wanted by WireGuard.
|
|
|
|
func packIPPort(ua netaddr.IPPort) []byte {
|
2021-05-15 01:07:28 +00:00
|
|
|
ip := ua.IP().Unmap()
|
2020-06-30 19:22:42 +00:00
|
|
|
a := ip.As16()
|
|
|
|
ipb := a[:]
|
|
|
|
if ip.Is4() {
|
|
|
|
ipb = ipb[12:]
|
|
|
|
}
|
|
|
|
b := make([]byte, 0, len(ipb)+2)
|
|
|
|
b = append(b, ipb...)
|
2021-05-15 01:07:28 +00:00
|
|
|
b = append(b, byte(ua.Port()))
|
|
|
|
b = append(b, byte(ua.Port()>>8))
|
2020-06-30 19:22:42 +00:00
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2021-03-24 16:41:57 +00:00
|
|
|
// ParseEndpoint is called by WireGuard to connect to an endpoint.
|
2021-04-30 23:45:36 +00:00
|
|
|
// endpointStr is a json-serialized wgcfg.Endpoints struct.
|
|
|
|
// If those Endpoints contain an active discovery key, ParseEndpoint returns a discoEndpoint.
|
|
|
|
// Otherwise it returns a legacy endpoint.
|
|
|
|
func (c *Conn) ParseEndpoint(endpointStr string) (conn.Endpoint, error) {
|
|
|
|
var endpoints wgcfg.Endpoints
|
|
|
|
err := json.Unmarshal([]byte(endpointStr), &endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("magicsock: ParseEndpoint: json.Unmarshal failed on %q: %w", endpointStr, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-04-30 23:45:36 +00:00
|
|
|
pk := key.Public(endpoints.PublicKey)
|
|
|
|
discoKey := endpoints.DiscoKey
|
|
|
|
c.logf("magicsock: ParseEndpoint: key=%s: disco=%s ipps=%s", pk.ShortString(), discoKey.ShortString(), derpStr(endpoints.IPPorts.String()))
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-04-30 23:45:36 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if discoKey.IsZero() {
|
|
|
|
return c.createLegacyEndpointLocked(pk, endpoints.IPPorts, endpointStr)
|
2020-12-18 08:34:59 +00:00
|
|
|
}
|
|
|
|
de := &discoEndpoint{
|
2021-04-30 23:43:26 +00:00
|
|
|
c: c,
|
|
|
|
publicKey: tailcfg.NodeKey(pk), // peer public key (for WireGuard + DERP)
|
|
|
|
discoKey: tailcfg.DiscoKey(discoKey), // for discovery mesages
|
|
|
|
discoShort: tailcfg.DiscoKey(discoKey).ShortString(),
|
2021-04-30 23:45:36 +00:00
|
|
|
wgEndpoint: endpointStr,
|
2021-04-30 23:43:26 +00:00
|
|
|
sentPing: map[stun.TxID]sentPing{},
|
|
|
|
endpointState: map[netaddr.IPPort]*endpointState{},
|
2020-12-18 08:34:59 +00:00
|
|
|
}
|
|
|
|
de.initFakeUDPAddr()
|
|
|
|
de.updateFromNode(c.nodeOfDisco[de.discoKey])
|
|
|
|
c.endpointOfDisco[de.discoKey] = de
|
|
|
|
return de, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RebindingUDPConn is a UDP socket that can be re-bound.
|
|
|
|
// Unix has no notion of re-binding a socket, so we swap it out for a new one.
|
|
|
|
type RebindingUDPConn struct {
|
2021-04-03 01:36:24 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
pconn net.PacketConn
|
2021-03-24 16:41:57 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 23:18:56 +00:00
|
|
|
// currentConn returns c's current pconn.
|
2021-04-20 17:08:46 +00:00
|
|
|
func (c *RebindingUDPConn) currentConn() net.PacketConn {
|
2021-03-24 16:41:57 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2021-04-03 01:36:24 +00:00
|
|
|
return c.pconn
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 23:18:56 +00:00
|
|
|
// ReadFrom reads a packet from c into b.
|
2021-02-11 21:35:06 +00:00
|
|
|
// It returns the number of bytes copied and the source address.
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *RebindingUDPConn) ReadFrom(b []byte) (int, net.Addr, error) {
|
|
|
|
for {
|
2021-04-03 01:36:24 +00:00
|
|
|
pconn := c.currentConn()
|
2020-02-05 22:16:58 +00:00
|
|
|
n, addr, err := pconn.ReadFrom(b)
|
2021-04-03 01:36:24 +00:00
|
|
|
if err != nil && pconn != c.currentConn() {
|
|
|
|
continue
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return n, addr, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-11 21:35:06 +00:00
|
|
|
// ReadFromNetaddr reads a packet from c into b.
|
|
|
|
// It returns the number of bytes copied and the return address.
|
|
|
|
// It is identical to c.ReadFrom, except that it returns a netaddr.IPPort instead of a net.Addr.
|
|
|
|
// ReadFromNetaddr is designed to work with specific underlying connection types.
|
|
|
|
// If c's underlying connection returns a non-*net.UPDAddr return address, ReadFromNetaddr will return an error.
|
|
|
|
// ReadFromNetaddr exists because it removes an allocation per read,
|
|
|
|
// when c's underlying connection is a net.UDPConn.
|
|
|
|
func (c *RebindingUDPConn) ReadFromNetaddr(b []byte) (n int, ipp netaddr.IPPort, err error) {
|
|
|
|
for {
|
2021-04-03 01:36:24 +00:00
|
|
|
pconn := c.currentConn()
|
2021-02-11 21:35:06 +00:00
|
|
|
|
|
|
|
// Optimization: Treat *net.UDPConn specially.
|
|
|
|
// ReadFromUDP gets partially inlined, avoiding allocating a *net.UDPAddr,
|
|
|
|
// as long as pAddr itself doesn't escape.
|
|
|
|
// The non-*net.UDPConn case works, but it allocates.
|
|
|
|
var pAddr *net.UDPAddr
|
|
|
|
if udpConn, ok := pconn.(*net.UDPConn); ok {
|
|
|
|
n, pAddr, err = udpConn.ReadFromUDP(b)
|
|
|
|
} else {
|
|
|
|
var addr net.Addr
|
|
|
|
n, addr, err = pconn.ReadFrom(b)
|
2021-02-12 18:17:55 +00:00
|
|
|
if addr != nil {
|
|
|
|
pAddr, ok = addr.(*net.UDPAddr)
|
|
|
|
if !ok {
|
|
|
|
return 0, netaddr.IPPort{}, fmt.Errorf("RebindingUDPConn.ReadFromNetaddr: underlying connection returned address of type %T, want *netaddr.UDPAddr", addr)
|
|
|
|
}
|
2021-02-11 21:35:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2021-04-03 01:36:24 +00:00
|
|
|
if pconn != c.currentConn() {
|
2021-02-11 21:35:06 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Convert pAddr to a netaddr.IPPort.
|
|
|
|
// This prevents pAddr from escaping.
|
|
|
|
var ok bool
|
|
|
|
ipp, ok = netaddr.FromStdAddr(pAddr.IP, pAddr.Port, pAddr.Zone)
|
|
|
|
if !ok {
|
|
|
|
return 0, netaddr.IPPort{}, errors.New("netaddr.FromStdAddr failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n, ipp, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *RebindingUDPConn) LocalAddr() *net.UDPAddr {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2021-03-08 23:48:49 +00:00
|
|
|
return c.localAddrLocked()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *RebindingUDPConn) localAddrLocked() *net.UDPAddr {
|
2020-02-05 22:16:58 +00:00
|
|
|
return c.pconn.LocalAddr().(*net.UDPAddr)
|
|
|
|
}
|
|
|
|
|
2021-04-27 21:40:29 +00:00
|
|
|
// errNilPConn is returned by RebindingUDPConn.Close when there is no current pconn.
|
|
|
|
// It is for internal use only and should not be returned to users.
|
|
|
|
var errNilPConn = errors.New("nil pconn")
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *RebindingUDPConn) Close() error {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2021-04-27 21:40:29 +00:00
|
|
|
return c.closeLocked()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *RebindingUDPConn) closeLocked() error {
|
|
|
|
if c.pconn == nil {
|
|
|
|
return errNilPConn
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
return c.pconn.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *RebindingUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) {
|
|
|
|
for {
|
|
|
|
c.mu.Lock()
|
|
|
|
pconn := c.pconn
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
n, err := pconn.WriteTo(b, addr)
|
|
|
|
if err != nil {
|
|
|
|
c.mu.Lock()
|
|
|
|
pconn2 := c.pconn
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
if pconn != pconn2 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
2020-03-23 21:12:23 +00:00
|
|
|
|
2021-04-27 21:40:29 +00:00
|
|
|
func newBlockForeverConn() *blockForeverConn {
|
|
|
|
c := new(blockForeverConn)
|
|
|
|
c.cond = sync.NewCond(&c.mu)
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockForeverConn is a net.PacketConn whose reads block until it is closed.
|
|
|
|
type blockForeverConn struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
cond *sync.Cond
|
|
|
|
closed bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *blockForeverConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
|
|
|
|
c.mu.Lock()
|
|
|
|
for !c.closed {
|
|
|
|
c.cond.Wait()
|
|
|
|
}
|
|
|
|
c.mu.Unlock()
|
|
|
|
return 0, nil, net.ErrClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *blockForeverConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
|
|
|
|
// Silently drop writes.
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *blockForeverConn) LocalAddr() net.Addr {
|
|
|
|
// Return a *net.UDPAddr because lots of code assumes that it will.
|
|
|
|
return new(net.UDPAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *blockForeverConn) Close() error {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.closed {
|
|
|
|
return net.ErrClosed
|
|
|
|
}
|
|
|
|
c.closed = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *blockForeverConn) SetDeadline(t time.Time) error { return errors.New("unimplemented") }
|
|
|
|
func (c *blockForeverConn) SetReadDeadline(t time.Time) error { return errors.New("unimplemented") }
|
|
|
|
func (c *blockForeverConn) SetWriteDeadline(t time.Time) error { return errors.New("unimplemented") }
|
|
|
|
|
2020-03-23 21:12:23 +00:00
|
|
|
// simpleDur rounds d such that it stringifies to something short.
|
|
|
|
func simpleDur(d time.Duration) time.Duration {
|
|
|
|
if d < time.Second {
|
|
|
|
return d.Round(time.Millisecond)
|
|
|
|
}
|
|
|
|
if d < time.Minute {
|
|
|
|
return d.Round(time.Second)
|
|
|
|
}
|
|
|
|
return d.Round(time.Minute)
|
|
|
|
}
|
2020-03-24 15:09:30 +00:00
|
|
|
|
|
|
|
func peerShort(k key.Public) string {
|
2020-12-30 01:22:56 +00:00
|
|
|
k2 := wgkey.Key(k)
|
2020-03-24 15:09:30 +00:00
|
|
|
return k2.ShortString()
|
|
|
|
}
|
2020-03-24 17:56:22 +00:00
|
|
|
|
2021-02-11 20:39:56 +00:00
|
|
|
func sbPrintAddr(sb *strings.Builder, a netaddr.IPPort) {
|
2021-05-15 01:07:28 +00:00
|
|
|
is6 := a.IP().Is6()
|
2020-03-24 20:40:43 +00:00
|
|
|
if is6 {
|
|
|
|
sb.WriteByte('[')
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
fmt.Fprintf(sb, "%s", a.IP())
|
2020-03-24 20:40:43 +00:00
|
|
|
if is6 {
|
|
|
|
sb.WriteByte(']')
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
fmt.Fprintf(sb, ":%d", a.Port())
|
2020-03-24 20:40:43 +00:00
|
|
|
}
|
2020-03-26 05:57:46 +00:00
|
|
|
|
2020-07-03 20:44:22 +00:00
|
|
|
func (c *Conn) derpRegionCodeOfAddrLocked(ipPort string) string {
|
|
|
|
_, portStr, err := net.SplitHostPort(ipPort)
|
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
regionID, err := strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return c.derpRegionCodeOfIDLocked(regionID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) derpRegionCodeOfIDLocked(regionID int) string {
|
|
|
|
if c.derpMap == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if r, ok := c.derpMap.Regions[regionID]; ok {
|
|
|
|
return r.RegionCode
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2020-03-26 05:57:46 +00:00
|
|
|
func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2021-04-14 14:20:27 +00:00
|
|
|
var tailAddr4 string
|
|
|
|
var tailscaleIPs []netaddr.IP
|
2020-07-27 20:25:25 +00:00
|
|
|
if c.netMap != nil {
|
2021-04-14 14:20:27 +00:00
|
|
|
tailscaleIPs = make([]netaddr.IP, 0, len(c.netMap.Addresses))
|
2020-07-27 20:25:25 +00:00
|
|
|
for _, addr := range c.netMap.Addresses {
|
2020-12-24 20:33:55 +00:00
|
|
|
if !addr.IsSingleIP() {
|
2020-07-27 20:25:25 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
sb.AddTailscaleIP(addr.IP())
|
2021-04-14 14:20:27 +00:00
|
|
|
// TailAddr previously only allowed for a
|
|
|
|
// single Tailscale IP. For compatibility for
|
|
|
|
// a couple releases starting with 1.8, keep
|
|
|
|
// that field pulled out separately.
|
2021-05-15 01:07:28 +00:00
|
|
|
if addr.IP().Is4() {
|
|
|
|
tailAddr4 = addr.IP().String()
|
2020-12-16 02:45:13 +00:00
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
tailscaleIPs = append(tailscaleIPs, addr.IP())
|
2020-07-27 20:25:25 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-25 22:38:40 +00:00
|
|
|
|
|
|
|
sb.MutateSelfStatus(func(ss *ipnstate.PeerStatus) {
|
|
|
|
ss.PublicKey = c.privateKey.Public()
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
ss.Addrs = make([]string, 0, len(c.lastEndpoints))
|
|
|
|
for _, ep := range c.lastEndpoints {
|
|
|
|
ss.Addrs = append(ss.Addrs, ep.Addr.String())
|
|
|
|
}
|
2021-03-25 22:38:40 +00:00
|
|
|
ss.OS = version.OS()
|
|
|
|
if c.netMap != nil {
|
|
|
|
ss.HostName = c.netMap.Hostinfo.Hostname
|
|
|
|
ss.DNSName = c.netMap.Name
|
|
|
|
ss.UserID = c.netMap.User
|
2021-04-16 17:57:46 +00:00
|
|
|
if c.netMap.SelfNode != nil {
|
|
|
|
if c := c.netMap.SelfNode.Capabilities; len(c) > 0 {
|
|
|
|
ss.Capabilities = append([]string(nil), c...)
|
|
|
|
}
|
|
|
|
}
|
2021-03-25 22:38:40 +00:00
|
|
|
} else {
|
|
|
|
ss.HostName, _ = os.Hostname()
|
|
|
|
}
|
|
|
|
if c.derpMap != nil {
|
|
|
|
derpRegion, ok := c.derpMap.Regions[c.myDerp]
|
|
|
|
if ok {
|
|
|
|
ss.Relay = derpRegion.RegionCode
|
|
|
|
}
|
|
|
|
}
|
2021-04-14 14:20:27 +00:00
|
|
|
ss.TailscaleIPs = tailscaleIPs
|
|
|
|
ss.TailAddrDeprecated = tailAddr4
|
2021-03-25 22:38:40 +00:00
|
|
|
})
|
2020-07-27 20:25:25 +00:00
|
|
|
|
2020-07-23 22:15:28 +00:00
|
|
|
for dk, n := range c.nodeOfDisco {
|
2020-07-03 18:06:33 +00:00
|
|
|
ps := &ipnstate.PeerStatus{InMagicSock: true}
|
2020-07-23 22:15:28 +00:00
|
|
|
ps.Addrs = append(ps.Addrs, n.Endpoints...)
|
|
|
|
ps.Relay = c.derpRegionCodeOfAddrLocked(n.DERP)
|
|
|
|
if de, ok := c.endpointOfDisco[dk]; ok {
|
|
|
|
de.populatePeerStatus(ps)
|
2020-07-03 18:06:33 +00:00
|
|
|
}
|
2020-07-23 22:15:28 +00:00
|
|
|
sb.AddPeer(key.Public(n.Key), ps)
|
2020-07-03 18:06:33 +00:00
|
|
|
}
|
|
|
|
// Old-style (pre-disco) peers:
|
2020-03-26 05:57:46 +00:00
|
|
|
for k, as := range c.addrsByKey {
|
|
|
|
ps := &ipnstate.PeerStatus{
|
|
|
|
InMagicSock: true,
|
2020-07-03 20:44:22 +00:00
|
|
|
Relay: c.derpRegionCodeOfIDLocked(as.derpID()),
|
2020-03-26 05:57:46 +00:00
|
|
|
}
|
2020-07-03 20:44:22 +00:00
|
|
|
as.populatePeerStatus(ps)
|
2020-03-26 05:57:46 +00:00
|
|
|
sb.AddPeer(k, ps)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.foreachActiveDerpSortedLocked(func(node int, ad activeDerp) {
|
|
|
|
// TODO(bradfitz): add to ipnstate.StatusBuilder
|
|
|
|
//f("<li><b>derp-%v</b>: cr%v,wr%v</li>", node, simpleDur(now.Sub(ad.createTime)), simpleDur(now.Sub(*ad.lastWrite)))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-02-11 20:39:56 +00:00
|
|
|
func ippDebugString(ua netaddr.IPPort) string {
|
2021-05-15 01:07:28 +00:00
|
|
|
if ua.IP() == derpMagicIPAddr {
|
|
|
|
return fmt.Sprintf("derp-%d", ua.Port())
|
2020-03-26 05:57:46 +00:00
|
|
|
}
|
|
|
|
return ua.String()
|
|
|
|
}
|
2020-06-28 18:53:37 +00:00
|
|
|
|
|
|
|
// discoEndpoint is a wireguard/conn.Endpoint for new-style peers that
|
|
|
|
// advertise a DiscoKey and participate in active discovery.
|
|
|
|
type discoEndpoint struct {
|
2020-08-06 21:57:03 +00:00
|
|
|
// atomically accessed; declared first for alignment reasons
|
2021-01-18 23:27:44 +00:00
|
|
|
lastRecvUnixAtomic int64
|
|
|
|
numStopAndResetAtomic int64
|
2020-08-06 21:57:03 +00:00
|
|
|
|
2020-07-02 05:15:41 +00:00
|
|
|
// These fields are initialized once and never modified.
|
2021-04-30 23:43:26 +00:00
|
|
|
c *Conn
|
|
|
|
publicKey tailcfg.NodeKey // peer public key (for WireGuard + DERP)
|
|
|
|
discoKey tailcfg.DiscoKey // for discovery mesages
|
|
|
|
discoShort string // ShortString of discoKey
|
|
|
|
fakeWGAddr netaddr.IPPort // the UDP address we tell wireguard-go we're using
|
2021-04-30 23:45:36 +00:00
|
|
|
wgEndpoint string // string from ParseEndpoint, holds a JSON-serialized wgcfg.Endpoints
|
2020-06-28 18:53:37 +00:00
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
// Owned by Conn.mu:
|
|
|
|
lastPingFrom netaddr.IPPort
|
|
|
|
lastPingTime time.Time
|
|
|
|
|
2020-06-30 22:32:19 +00:00
|
|
|
// mu protects all following fields.
|
|
|
|
mu sync.Mutex // Lock ordering: Conn.mu, then discoEndpoint.mu
|
|
|
|
|
2020-07-03 19:43:39 +00:00
|
|
|
heartBeatTimer *time.Timer // nil when idle
|
|
|
|
lastSend time.Time // last time there was outgoing packets sent to this peer (from wireguard-go)
|
|
|
|
lastFullPing time.Time // last time we pinged all endpoints
|
|
|
|
derpAddr netaddr.IPPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients)
|
2020-06-30 22:32:19 +00:00
|
|
|
|
2021-03-23 17:07:34 +00:00
|
|
|
bestAddr addrLatency // best non-DERP path; zero if none
|
|
|
|
bestAddrAt time.Time // time best address re-confirmed
|
|
|
|
trustBestAddrUntil time.Time // time when bestAddr expires
|
2020-07-01 22:28:14 +00:00
|
|
|
sentPing map[stun.TxID]sentPing
|
|
|
|
endpointState map[netaddr.IPPort]*endpointState
|
2021-01-20 20:41:25 +00:00
|
|
|
isCallMeMaybeEP map[netaddr.IPPort]bool
|
2020-08-09 21:49:42 +00:00
|
|
|
|
|
|
|
pendingCLIPings []pendingCLIPing // any outstanding "tailscale ping" commands running
|
|
|
|
}
|
|
|
|
|
|
|
|
type pendingCLIPing struct {
|
|
|
|
res *ipnstate.PingResult
|
|
|
|
cb func(*ipnstate.PingResult)
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
const (
|
2020-07-03 19:43:39 +00:00
|
|
|
// sessionActiveTimeout is how long since the last activity we
|
|
|
|
// try to keep an established discoEndpoint peering alive.
|
2021-01-20 17:52:24 +00:00
|
|
|
// It's also the idle time at which we stop doing STUN queries to
|
|
|
|
// keep NAT mappings alive.
|
2020-07-03 19:43:39 +00:00
|
|
|
sessionActiveTimeout = 2 * time.Minute
|
|
|
|
|
|
|
|
// upgradeInterval is how often we try to upgrade to a better path
|
|
|
|
// even if we have some non-DERP route that works.
|
|
|
|
upgradeInterval = 1 * time.Minute
|
|
|
|
|
|
|
|
// heartbeatInterval is how often pings to the best UDP address
|
|
|
|
// are sent.
|
|
|
|
heartbeatInterval = 2 * time.Second
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
// discoPingInterval is the minimum time between pings
|
|
|
|
// to an endpoint. (Except in the case of CallMeMaybe frames
|
|
|
|
// resetting the counter, as the first pings likely didn't through
|
|
|
|
// the firewall)
|
|
|
|
discoPingInterval = 5 * time.Second
|
|
|
|
|
|
|
|
// pingTimeoutDuration is how long we wait for a pong reply before
|
|
|
|
// assuming it's never coming.
|
|
|
|
pingTimeoutDuration = 5 * time.Second
|
|
|
|
|
|
|
|
// trustUDPAddrDuration is how long we trust a UDP address as the exclusive
|
|
|
|
// path (without using DERP) without having heard a Pong reply.
|
|
|
|
trustUDPAddrDuration = 5 * time.Second
|
2020-07-03 19:43:39 +00:00
|
|
|
|
|
|
|
// goodEnoughLatency is the latency at or under which we don't
|
|
|
|
// try to upgrade to a better path.
|
|
|
|
goodEnoughLatency = 5 * time.Millisecond
|
2021-01-19 23:29:50 +00:00
|
|
|
|
|
|
|
// derpInactiveCleanupTime is how long a non-home DERP connection
|
|
|
|
// needs to be idle (last written to) before we close it.
|
|
|
|
derpInactiveCleanupTime = 60 * time.Second
|
|
|
|
|
|
|
|
// derpCleanStaleInterval is how often cleanStaleDerp runs when there
|
|
|
|
// are potentially-stale DERP connections to close.
|
|
|
|
derpCleanStaleInterval = 15 * time.Second
|
2021-01-20 17:52:24 +00:00
|
|
|
|
|
|
|
// endpointsFreshEnoughDuration is how long we consider a
|
|
|
|
// STUN-derived endpoint valid for. UDP NAT mappings typically
|
|
|
|
// expire at 30 seconds, so this is a few seconds shy of that.
|
|
|
|
endpointsFreshEnoughDuration = 27 * time.Second
|
2020-07-03 18:06:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// endpointState is some state and history for a specific endpoint of
|
|
|
|
// a discoEndpoint. (The subject is the discoEndpoint.endpointState
|
|
|
|
// map key)
|
2020-06-30 22:32:19 +00:00
|
|
|
type endpointState struct {
|
2020-08-13 03:12:56 +00:00
|
|
|
// all fields guarded by discoEndpoint.mu
|
|
|
|
|
|
|
|
// lastPing is the last (outgoing) ping time.
|
|
|
|
lastPing time.Time
|
|
|
|
|
|
|
|
// lastGotPing, if non-zero, means that this was an endpoint
|
|
|
|
// that we learned about at runtime (from an incoming ping)
|
|
|
|
// and that is not in the network map. If so, we keep the time
|
|
|
|
// updated and use it to discard old candidates.
|
|
|
|
lastGotPing time.Time
|
|
|
|
|
2021-01-20 20:41:25 +00:00
|
|
|
// callMeMaybeTime, if non-zero, is the time this endpoint
|
|
|
|
// was advertised last via a call-me-maybe disco message.
|
|
|
|
callMeMaybeTime time.Time
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
recentPongs []pongReply // ring buffer up to pongHistoryCount entries
|
2020-08-09 21:49:42 +00:00
|
|
|
recentPong uint16 // index into recentPongs of most recent; older before, wrapped
|
2020-08-13 03:12:56 +00:00
|
|
|
|
|
|
|
index int16 // index in nodecfg.Node.Endpoints; meaningless if lastGotPing non-zero
|
|
|
|
}
|
|
|
|
|
|
|
|
// indexSentinelDeleted is the temporary value that endpointState.index takes while
|
|
|
|
// a discoEndpoint's endpoints are being updated from a new network map.
|
|
|
|
const indexSentinelDeleted = -1
|
|
|
|
|
|
|
|
// shouldDeleteLocked reports whether we should delete this endpoint.
|
|
|
|
func (st *endpointState) shouldDeleteLocked() bool {
|
|
|
|
switch {
|
2021-01-20 20:41:25 +00:00
|
|
|
case !st.callMeMaybeTime.IsZero():
|
|
|
|
return false
|
2020-08-13 03:12:56 +00:00
|
|
|
case st.lastGotPing.IsZero():
|
|
|
|
// This was an endpoint from the network map. Is it still in the network map?
|
|
|
|
return st.index == indexSentinelDeleted
|
|
|
|
default:
|
2021-01-20 20:41:25 +00:00
|
|
|
// This was an endpoint discovered at runtime.
|
2020-08-13 03:12:56 +00:00
|
|
|
return time.Since(st.lastGotPing) > sessionActiveTimeout
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (de *discoEndpoint) deleteEndpointLocked(ep netaddr.IPPort) {
|
|
|
|
delete(de.endpointState, ep)
|
2021-03-23 17:07:34 +00:00
|
|
|
if de.bestAddr.IPPort == ep {
|
|
|
|
de.bestAddr = addrLatency{}
|
2020-08-13 03:12:56 +00:00
|
|
|
}
|
2020-07-03 18:06:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// pongHistoryCount is how many pongReply values we keep per endpointState
|
|
|
|
const pongHistoryCount = 64
|
|
|
|
|
|
|
|
type pongReply struct {
|
|
|
|
latency time.Duration
|
|
|
|
pongAt time.Time // when we received the pong
|
|
|
|
from netaddr.IPPort // the pong's src (usually same as endpoint map key)
|
|
|
|
pongSrc netaddr.IPPort // what they reported they heard
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type sentPing struct {
|
2020-07-18 20:50:08 +00:00
|
|
|
to netaddr.IPPort
|
|
|
|
at time.Time
|
|
|
|
timer *time.Timer // timeout timer
|
|
|
|
purpose discoPingPurpose
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// initFakeUDPAddr populates fakeWGAddr with a globally unique fake UDPAddr.
|
|
|
|
// The current implementation just uses the pointer value of de jammed into an IPv6
|
|
|
|
// address, but it could also be, say, a counter.
|
|
|
|
func (de *discoEndpoint) initFakeUDPAddr() {
|
|
|
|
var addr [16]byte
|
|
|
|
addr[0] = 0xfd
|
|
|
|
addr[1] = 0x00
|
|
|
|
binary.BigEndian.PutUint64(addr[2:], uint64(reflect.ValueOf(de).Pointer()))
|
2021-05-15 01:07:28 +00:00
|
|
|
de.fakeWGAddr = netaddr.IPPortFrom(netaddr.IPFrom16(addr), 12345)
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 21:57:03 +00:00
|
|
|
// isFirstRecvActivityInAwhile notes that receive activity has occured for this
|
|
|
|
// endpoint and reports whether it's been at least 10 seconds since the last
|
|
|
|
// receive activity (including having never received from this peer before).
|
|
|
|
func (de *discoEndpoint) isFirstRecvActivityInAwhile() bool {
|
|
|
|
now := time.Now().Unix()
|
|
|
|
old := atomic.LoadInt64(&de.lastRecvUnixAtomic)
|
|
|
|
if old <= now-10 {
|
|
|
|
atomic.StoreInt64(&de.lastRecvUnixAtomic, now)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-07-04 05:26:53 +00:00
|
|
|
// String exists purely so wireguard-go internals can log.Printf("%v")
|
|
|
|
// its internal conn.Endpoints and we don't end up with data races
|
|
|
|
// from fmt (via log) reading mutex fields and such.
|
|
|
|
func (de *discoEndpoint) String() string {
|
|
|
|
return fmt.Sprintf("magicsock.discoEndpoint{%v, %v}", de.publicKey.ShortString(), de.discoShort)
|
|
|
|
}
|
|
|
|
|
2020-06-28 18:53:37 +00:00
|
|
|
func (de *discoEndpoint) ClearSrc() {}
|
|
|
|
func (de *discoEndpoint) SrcToString() string { panic("unused") } // unused by wireguard-go
|
|
|
|
func (de *discoEndpoint) SrcIP() net.IP { panic("unused") } // unused by wireguard-go
|
2021-04-30 23:43:26 +00:00
|
|
|
func (de *discoEndpoint) DstToString() string { return de.wgEndpoint }
|
2020-06-28 18:53:37 +00:00
|
|
|
func (de *discoEndpoint) DstIP() net.IP { panic("unused") }
|
2020-06-30 19:22:42 +00:00
|
|
|
func (de *discoEndpoint) DstToBytes() []byte { return packIPPort(de.fakeWGAddr) }
|
2020-06-28 18:53:37 +00:00
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
// addrForSendLocked returns the address(es) that should be used for
|
|
|
|
// sending the next packet. Zero, one, or both of UDP address and DERP
|
|
|
|
// addr may be non-zero.
|
|
|
|
//
|
|
|
|
// de.mu must be held.
|
|
|
|
func (de *discoEndpoint) addrForSendLocked(now time.Time) (udpAddr, derpAddr netaddr.IPPort) {
|
2021-03-23 17:07:34 +00:00
|
|
|
udpAddr = de.bestAddr.IPPort
|
2020-07-03 18:06:33 +00:00
|
|
|
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
|
|
|
|
// We had a bestAddr but it expired so send both to it
|
|
|
|
// and DERP.
|
|
|
|
derpAddr = de.derpAddr
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-03 19:43:39 +00:00
|
|
|
// heartbeat is called every heartbeatInterval to keep the best UDP path alive,
|
|
|
|
// or kick off discovery of other paths.
|
|
|
|
func (de *discoEndpoint) heartbeat() {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
|
|
|
de.heartBeatTimer = nil
|
|
|
|
|
|
|
|
if de.lastSend.IsZero() {
|
|
|
|
// Shouldn't happen.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Since(de.lastSend) > sessionActiveTimeout {
|
|
|
|
// Session's idle. Stop heartbeating.
|
2020-12-21 18:58:06 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: ending heartbeats for idle session to %v (%v)", de.publicKey.ShortString(), de.discoShort)
|
2020-07-03 19:43:39 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
udpAddr, _ := de.addrForSendLocked(now)
|
|
|
|
if !udpAddr.IsZero() {
|
|
|
|
// We have a preferred path. Ping that every 2 seconds.
|
2020-07-18 20:50:08 +00:00
|
|
|
de.startPingLocked(udpAddr, now, pingHeartbeat)
|
2020-07-03 19:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if de.wantFullPingLocked(now) {
|
|
|
|
de.sendPingsLocked(now, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat)
|
|
|
|
}
|
|
|
|
|
|
|
|
// wantFullPingLocked reports whether we should ping to all our peers looking for
|
|
|
|
// a better path.
|
|
|
|
//
|
|
|
|
// de.mu must be held.
|
|
|
|
func (de *discoEndpoint) wantFullPingLocked(now time.Time) bool {
|
|
|
|
if de.bestAddr.IsZero() || de.lastFullPing.IsZero() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if now.After(de.trustBestAddrUntil) {
|
|
|
|
return true
|
|
|
|
}
|
2021-03-23 17:07:34 +00:00
|
|
|
if de.bestAddr.latency <= goodEnoughLatency {
|
2020-07-03 19:43:39 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if now.Sub(de.lastFullPing) >= upgradeInterval {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (de *discoEndpoint) noteActiveLocked() {
|
|
|
|
de.lastSend = time.Now()
|
|
|
|
if de.heartBeatTimer == nil {
|
|
|
|
de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-09 21:49:42 +00:00
|
|
|
// cliPing starts a ping for the "tailscale ping" command. res is value to call cb with,
|
|
|
|
// already partially filled.
|
|
|
|
func (de *discoEndpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.PingResult)) {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
|
|
|
de.pendingCLIPings = append(de.pendingCLIPings, pendingCLIPing{res, cb})
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
udpAddr, derpAddr := de.addrForSendLocked(now)
|
|
|
|
if !derpAddr.IsZero() {
|
|
|
|
de.startPingLocked(derpAddr, now, pingCLI)
|
|
|
|
}
|
|
|
|
if !udpAddr.IsZero() && now.Before(de.trustBestAddrUntil) {
|
|
|
|
// Already have an active session, so just ping the address we're using.
|
|
|
|
// Otherwise "tailscale ping" results to a node on the local network
|
|
|
|
// can look like they're bouncing between, say 10.0.0.0/9 and the peer's
|
|
|
|
// IPv6 address, both 1ms away, and it's random who replies first.
|
|
|
|
de.startPingLocked(udpAddr, now, pingCLI)
|
|
|
|
} else {
|
|
|
|
for ep := range de.endpointState {
|
|
|
|
de.startPingLocked(ep, now, pingCLI)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
de.noteActiveLocked()
|
|
|
|
}
|
|
|
|
|
2020-06-28 18:53:37 +00:00
|
|
|
func (de *discoEndpoint) send(b []byte) error {
|
2020-06-30 22:32:19 +00:00
|
|
|
now := time.Now()
|
|
|
|
|
2020-06-28 18:53:37 +00:00
|
|
|
de.mu.Lock()
|
2020-07-03 18:06:33 +00:00
|
|
|
udpAddr, derpAddr := de.addrForSendLocked(now)
|
|
|
|
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
|
2020-07-01 22:28:14 +00:00
|
|
|
de.sendPingsLocked(now, true)
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
2020-07-03 19:43:39 +00:00
|
|
|
de.noteActiveLocked()
|
2020-06-28 18:53:37 +00:00
|
|
|
de.mu.Unlock()
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
if udpAddr.IsZero() && derpAddr.IsZero() {
|
|
|
|
return errors.New("no UDP or DERP addr")
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
2020-07-03 18:06:33 +00:00
|
|
|
var err error
|
|
|
|
if !udpAddr.IsZero() {
|
2020-07-23 22:15:28 +00:00
|
|
|
_, err = de.c.sendAddr(udpAddr, key.Public(de.publicKey), b)
|
2020-07-03 18:06:33 +00:00
|
|
|
}
|
|
|
|
if !derpAddr.IsZero() {
|
2020-07-23 22:15:28 +00:00
|
|
|
if ok, _ := de.c.sendAddr(derpAddr, key.Public(de.publicKey), b); ok && err != nil {
|
2020-07-03 18:06:33 +00:00
|
|
|
// UDP failed but DERP worked, so good enough:
|
|
|
|
return nil
|
|
|
|
}
|
2020-07-01 22:28:14 +00:00
|
|
|
}
|
2020-07-01 21:39:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-24 18:18:35 +00:00
|
|
|
func (de *discoEndpoint) pingTimeout(txid stun.TxID) {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
sp, ok := de.sentPing[txid]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if debugDisco || de.bestAddr.IsZero() || time.Now().After(de.trustBestAddrUntil) {
|
2020-12-21 18:58:06 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort)
|
2020-07-24 18:18:35 +00:00
|
|
|
}
|
|
|
|
de.removeSentPingLocked(txid, sp)
|
|
|
|
}
|
|
|
|
|
2020-07-01 21:39:21 +00:00
|
|
|
// forgetPing is called by a timer when a ping either fails to send or
|
|
|
|
// has taken too long to get a pong reply.
|
|
|
|
func (de *discoEndpoint) forgetPing(txid stun.TxID) {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
if sp, ok := de.sentPing[txid]; ok {
|
2020-07-03 05:48:12 +00:00
|
|
|
de.removeSentPingLocked(txid, sp)
|
2020-07-01 21:39:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-03 05:48:12 +00:00
|
|
|
func (de *discoEndpoint) removeSentPingLocked(txid stun.TxID, sp sentPing) {
|
|
|
|
// Stop the timer for the case where sendPing failed to write to UDP.
|
|
|
|
// In the case of a timer already having fired, this is a no-op:
|
|
|
|
sp.timer.Stop()
|
|
|
|
delete(de.sentPing, txid)
|
|
|
|
}
|
|
|
|
|
2020-07-03 19:43:39 +00:00
|
|
|
// sendDiscoPing sends a ping with the provided txid to ep.
|
|
|
|
//
|
|
|
|
// The caller (startPingLocked) should've already been recorded the ping in
|
|
|
|
// sentPing and set up the timer.
|
2020-07-18 20:50:08 +00:00
|
|
|
func (de *discoEndpoint) sendDiscoPing(ep netaddr.IPPort, txid stun.TxID, logLevel discoLogLevel) {
|
|
|
|
sent, _ := de.sendDiscoMessage(ep, &disco.Ping{TxID: [12]byte(txid)}, logLevel)
|
2020-07-01 21:39:21 +00:00
|
|
|
if !sent {
|
|
|
|
de.forgetPing(txid)
|
|
|
|
}
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
// discoPingPurpose is the reason why a discovery ping message was sent.
|
|
|
|
type discoPingPurpose int
|
|
|
|
|
2020-09-11 23:26:58 +00:00
|
|
|
//go:generate stringer -type=discoPingPurpose -trimprefix=ping
|
2020-07-18 20:50:08 +00:00
|
|
|
const (
|
|
|
|
// pingDiscovery means that purpose of a ping was to see if a
|
|
|
|
// path was valid.
|
|
|
|
pingDiscovery discoPingPurpose = iota
|
|
|
|
|
|
|
|
// pingHeartbeat means that purpose of a ping was whether a
|
|
|
|
// peer was still there.
|
|
|
|
pingHeartbeat
|
2020-08-09 21:49:42 +00:00
|
|
|
|
|
|
|
// pingCLI means that the user is running "tailscale ping"
|
|
|
|
// from the CLI. These types of pings can go over DERP.
|
|
|
|
pingCLI
|
2020-07-18 20:50:08 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now time.Time, purpose discoPingPurpose) {
|
2020-08-09 21:49:42 +00:00
|
|
|
if purpose != pingCLI {
|
|
|
|
st, ok := de.endpointState[ep]
|
|
|
|
if !ok {
|
|
|
|
// Shouldn't happen. But don't ping an endpoint that's
|
|
|
|
// not active for us.
|
|
|
|
de.c.logf("magicsock: disco: [unexpected] attempt to ping no longer live endpoint %v", ep)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
st.lastPing = now
|
2020-07-03 19:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txid := stun.NewTxID()
|
|
|
|
de.sentPing[txid] = sentPing{
|
2020-07-24 18:18:35 +00:00
|
|
|
to: ep,
|
|
|
|
at: now,
|
|
|
|
timer: time.AfterFunc(pingTimeoutDuration, func() { de.pingTimeout(txid) }),
|
2020-07-18 20:50:08 +00:00
|
|
|
purpose: purpose,
|
|
|
|
}
|
|
|
|
logLevel := discoLog
|
|
|
|
if purpose == pingHeartbeat {
|
|
|
|
logLevel = discoVerboseLog
|
2020-07-03 19:43:39 +00:00
|
|
|
}
|
2020-07-18 20:50:08 +00:00
|
|
|
go de.sendDiscoPing(ep, txid, logLevel)
|
2020-07-03 19:43:39 +00:00
|
|
|
}
|
|
|
|
|
2020-07-01 22:28:14 +00:00
|
|
|
func (de *discoEndpoint) sendPingsLocked(now time.Time, sendCallMeMaybe bool) {
|
2020-07-03 19:43:39 +00:00
|
|
|
de.lastFullPing = now
|
2020-07-03 05:48:12 +00:00
|
|
|
var sentAny bool
|
2020-07-01 19:56:17 +00:00
|
|
|
for ep, st := range de.endpointState {
|
2020-08-13 03:12:56 +00:00
|
|
|
if st.shouldDeleteLocked() {
|
|
|
|
de.deleteEndpointLocked(ep)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-03 18:06:33 +00:00
|
|
|
if !st.lastPing.IsZero() && now.Sub(st.lastPing) < discoPingInterval {
|
2020-07-01 19:56:17 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-07-03 05:48:12 +00:00
|
|
|
|
|
|
|
firstPing := !sentAny
|
|
|
|
sentAny = true
|
|
|
|
|
|
|
|
if firstPing && sendCallMeMaybe {
|
2020-12-21 18:58:06 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: send, starting discovery for %v (%v)", de.publicKey.ShortString(), de.discoShort)
|
2020-07-03 05:48:12 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
de.startPingLocked(ep, now, pingDiscovery)
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
|
|
|
derpAddr := de.derpAddr
|
2020-07-03 05:48:12 +00:00
|
|
|
if sentAny && sendCallMeMaybe && !derpAddr.IsZero() {
|
2021-01-20 20:41:25 +00:00
|
|
|
// Have our magicsock.Conn figure out its STUN endpoint (if
|
|
|
|
// it doesn't know already) and then send a CallMeMaybe
|
|
|
|
// message to our peer via DERP informing them that we've
|
|
|
|
// sent so our firewall ports are probably open and now
|
|
|
|
// would be a good time for them to connect.
|
|
|
|
go de.c.enqueueCallMeMaybe(derpAddr, de)
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
func (de *discoEndpoint) sendDiscoMessage(dst netaddr.IPPort, dm disco.Message, logLevel discoLogLevel) (sent bool, err error) {
|
|
|
|
return de.c.sendDiscoMessage(dst, de.publicKey, de.discoKey, dm, logLevel)
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (de *discoEndpoint) updateFromNode(n *tailcfg.Node) {
|
|
|
|
if n == nil {
|
|
|
|
// TODO: log, error, count? if this even happens.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
|
|
|
if n.DERP == "" {
|
2020-06-30 19:22:42 +00:00
|
|
|
de.derpAddr = netaddr.IPPort{}
|
2020-06-28 18:53:37 +00:00
|
|
|
} else {
|
2020-06-30 19:22:42 +00:00
|
|
|
de.derpAddr, _ = netaddr.ParseIPPort(n.DERP)
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
2020-06-30 22:32:19 +00:00
|
|
|
for _, st := range de.endpointState {
|
2020-08-13 03:12:56 +00:00
|
|
|
st.index = indexSentinelDeleted // assume deleted until updated in next loop
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
for i, epStr := range n.Endpoints {
|
2020-07-03 18:06:33 +00:00
|
|
|
if i > math.MaxInt16 {
|
|
|
|
// Seems unlikely.
|
|
|
|
continue
|
|
|
|
}
|
2020-06-30 22:32:19 +00:00
|
|
|
ipp, err := netaddr.ParseIPPort(epStr)
|
|
|
|
if err != nil {
|
|
|
|
de.c.logf("magicsock: bogus netmap endpoint %q", epStr)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if st, ok := de.endpointState[ipp]; ok {
|
2020-07-03 18:06:33 +00:00
|
|
|
st.index = int16(i)
|
2020-06-30 22:32:19 +00:00
|
|
|
} else {
|
2020-07-03 18:06:33 +00:00
|
|
|
de.endpointState[ipp] = &endpointState{index: int16(i)}
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
}
|
2020-08-13 03:12:56 +00:00
|
|
|
|
|
|
|
// Now delete anything unless it's still in the network map or
|
|
|
|
// was a recently discovered endpoint.
|
|
|
|
for ep, st := range de.endpointState {
|
|
|
|
if st.shouldDeleteLocked() {
|
|
|
|
de.deleteEndpointLocked(ep)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addCandidateEndpoint adds ep as an endpoint to which we should send
|
|
|
|
// future pings.
|
|
|
|
//
|
|
|
|
// This is called once we've already verified that we got a valid
|
|
|
|
// discovery message from de via ep.
|
|
|
|
func (de *discoEndpoint) addCandidateEndpoint(ep netaddr.IPPort) {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
|
|
|
if st, ok := de.endpointState[ep]; ok {
|
|
|
|
if st.lastGotPing.IsZero() {
|
|
|
|
// Already-known endpoint from the network map.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
st.lastGotPing = time.Now()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Newly discovered endpoint. Exciting!
|
2021-03-19 20:18:02 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: adding %v as candidate endpoint for %v (%s)", ep, de.discoShort, de.publicKey.ShortString())
|
2020-08-13 03:12:56 +00:00
|
|
|
de.endpointState[ep] = &endpointState{
|
|
|
|
lastGotPing: time.Now(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// If for some reason this gets very large, do some cleanup.
|
|
|
|
if size := len(de.endpointState); size > 100 {
|
|
|
|
for ep, st := range de.endpointState {
|
|
|
|
if st.shouldDeleteLocked() {
|
|
|
|
de.deleteEndpointLocked(ep)
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
|
|
|
}
|
2020-08-13 03:12:56 +00:00
|
|
|
size2 := len(de.endpointState)
|
2021-03-19 20:18:02 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: addCandidateEndpoint pruned %v candidate set from %v to %v entries", size, size2)
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// noteConnectivityChange is called when connectivity changes enough
|
|
|
|
// that we should question our earlier assumptions about which paths
|
|
|
|
// work.
|
|
|
|
func (de *discoEndpoint) noteConnectivityChange() {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
2020-07-01 22:28:14 +00:00
|
|
|
de.trustBestAddrUntil = time.Time{}
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
2020-07-02 05:15:41 +00:00
|
|
|
// handlePongConnLocked handles a Pong message (a reply to an earlier ping).
|
|
|
|
// It should be called with the Conn.mu held.
|
|
|
|
func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort) {
|
2020-07-01 19:56:17 +00:00
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
2021-05-15 01:07:28 +00:00
|
|
|
isDerp := src.IP() == derpMagicIPAddr
|
2020-07-02 05:15:41 +00:00
|
|
|
|
2020-07-01 19:56:17 +00:00
|
|
|
sp, ok := de.sentPing[m.TxID]
|
|
|
|
if !ok {
|
|
|
|
// This is not a pong for a ping we sent. Ignore.
|
|
|
|
return
|
|
|
|
}
|
2020-07-03 05:48:12 +00:00
|
|
|
de.removeSentPingLocked(m.TxID, sp)
|
2020-07-01 19:56:17 +00:00
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-03 18:06:33 +00:00
|
|
|
latency := now.Sub(sp.at)
|
|
|
|
|
2020-08-09 21:49:42 +00:00
|
|
|
if !isDerp {
|
|
|
|
st, ok := de.endpointState[sp.to]
|
|
|
|
if !ok {
|
|
|
|
// This is no longer an endpoint we care about.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
de.c.setAddrToDiscoLocked(src, de.discoKey, de)
|
|
|
|
|
|
|
|
st.addPongReplyLocked(pongReply{
|
|
|
|
latency: latency,
|
|
|
|
pongAt: now,
|
|
|
|
from: src,
|
|
|
|
pongSrc: m.Src,
|
|
|
|
})
|
|
|
|
}
|
2020-07-03 05:48:12 +00:00
|
|
|
|
2020-07-18 20:50:08 +00:00
|
|
|
if sp.purpose != pingHeartbeat {
|
2020-12-21 18:58:06 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v", de.c.discoShort, de.discoShort, de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), m.Src, logger.ArgWriter(func(bw *bufio.Writer) {
|
2020-07-18 20:50:08 +00:00
|
|
|
if sp.to != src {
|
|
|
|
fmt.Fprintf(bw, " ping.to=%v", sp.to)
|
|
|
|
}
|
|
|
|
}))
|
|
|
|
}
|
2020-07-01 19:56:17 +00:00
|
|
|
|
2020-08-09 21:49:42 +00:00
|
|
|
for _, pp := range de.pendingCLIPings {
|
|
|
|
de.c.populateCLIPingResponseLocked(pp.res, latency, sp.to)
|
|
|
|
go pp.cb(pp.res)
|
|
|
|
}
|
|
|
|
de.pendingCLIPings = nil
|
|
|
|
|
2020-07-01 19:56:17 +00:00
|
|
|
// Promote this pong response to our current best address if it's lower latency.
|
|
|
|
// TODO(bradfitz): decide how latency vs. preference order affects decision
|
2020-08-09 21:49:42 +00:00
|
|
|
if !isDerp {
|
2021-03-23 17:07:34 +00:00
|
|
|
thisPong := addrLatency{sp.to, latency}
|
|
|
|
if betterAddr(thisPong, de.bestAddr) {
|
|
|
|
de.c.logf("magicsock: disco: node %v %v now using %v", de.publicKey.ShortString(), de.discoShort, sp.to)
|
|
|
|
de.bestAddr = thisPong
|
2020-08-09 21:49:42 +00:00
|
|
|
}
|
2021-03-23 17:07:34 +00:00
|
|
|
if de.bestAddr.IPPort == thisPong.IPPort {
|
|
|
|
de.bestAddr.latency = latency
|
2020-08-09 21:49:42 +00:00
|
|
|
de.bestAddrAt = now
|
|
|
|
de.trustBestAddrUntil = now.Add(trustUDPAddrDuration)
|
2020-07-02 18:37:19 +00:00
|
|
|
}
|
2020-07-03 18:06:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 17:07:34 +00:00
|
|
|
// addrLatency is an IPPort with an associated latency.
|
|
|
|
type addrLatency struct {
|
|
|
|
netaddr.IPPort
|
|
|
|
latency time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
// betterAddr reports whether a is a better addr to use than b.
|
|
|
|
func betterAddr(a, b addrLatency) bool {
|
|
|
|
if a.IPPort == b.IPPort {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if b.IsZero() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if a.IsZero() {
|
|
|
|
return false
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
if a.IP().Is6() && b.IP().Is4() {
|
2021-03-23 17:17:19 +00:00
|
|
|
// Prefer IPv6 for being a bit more robust, as long as
|
|
|
|
// the latencies are roughly equivalent.
|
|
|
|
if a.latency/10*9 < b.latency {
|
|
|
|
return true
|
|
|
|
}
|
2021-05-15 01:07:28 +00:00
|
|
|
} else if a.IP().Is4() && b.IP().Is6() {
|
2021-03-23 17:17:19 +00:00
|
|
|
if betterAddr(b, a) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2021-03-23 17:07:34 +00:00
|
|
|
return a.latency < b.latency
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
// discoEndpoint.mu must be held.
|
|
|
|
func (st *endpointState) addPongReplyLocked(r pongReply) {
|
|
|
|
if n := len(st.recentPongs); n < pongHistoryCount {
|
|
|
|
st.recentPong = uint16(n)
|
|
|
|
st.recentPongs = append(st.recentPongs, r)
|
|
|
|
return
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
2020-07-03 18:06:33 +00:00
|
|
|
i := st.recentPong + 1
|
|
|
|
if i == pongHistoryCount {
|
|
|
|
i = 0
|
|
|
|
}
|
|
|
|
st.recentPongs[i] = r
|
|
|
|
st.recentPong = i
|
2020-07-01 19:56:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-01 22:28:14 +00:00
|
|
|
// handleCallMeMaybe handles a CallMeMaybe discovery message via
|
|
|
|
// DERP. The contract for use of this message is that the peer has
|
|
|
|
// already sent to us via UDP, so their stateful firewall should be
|
|
|
|
// open. Now we can Ping back and make it through.
|
2021-01-20 20:41:25 +00:00
|
|
|
func (de *discoEndpoint) handleCallMeMaybe(m *disco.CallMeMaybe) {
|
2020-07-01 22:28:14 +00:00
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
2021-01-20 20:41:25 +00:00
|
|
|
now := time.Now()
|
|
|
|
for ep := range de.isCallMeMaybeEP {
|
|
|
|
de.isCallMeMaybeEP[ep] = false // mark for deletion
|
|
|
|
}
|
|
|
|
if de.isCallMeMaybeEP == nil {
|
|
|
|
de.isCallMeMaybeEP = map[netaddr.IPPort]bool{}
|
|
|
|
}
|
2021-01-21 16:05:07 +00:00
|
|
|
var newEPs []netaddr.IPPort
|
2021-01-20 20:41:25 +00:00
|
|
|
for _, ep := range m.MyNumber {
|
2021-05-15 01:07:28 +00:00
|
|
|
if ep.IP().Is6() && ep.IP().IsLinkLocalUnicast() {
|
2021-01-21 16:05:07 +00:00
|
|
|
// We send these out, but ignore them for now.
|
|
|
|
// TODO: teach the ping code to ping on all interfaces
|
|
|
|
// for these.
|
|
|
|
continue
|
|
|
|
}
|
2021-01-20 20:41:25 +00:00
|
|
|
de.isCallMeMaybeEP[ep] = true
|
|
|
|
if es, ok := de.endpointState[ep]; ok {
|
|
|
|
es.callMeMaybeTime = now
|
|
|
|
} else {
|
|
|
|
de.endpointState[ep] = &endpointState{callMeMaybeTime: now}
|
2021-01-21 16:05:07 +00:00
|
|
|
newEPs = append(newEPs, ep)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(newEPs) > 0 {
|
2021-03-19 20:18:02 +00:00
|
|
|
de.c.logf("[v1] magicsock: disco: call-me-maybe from %v %v added new endpoints: %v",
|
2021-01-21 16:05:07 +00:00
|
|
|
de.publicKey.ShortString(), de.discoShort,
|
|
|
|
logger.ArgWriter(func(w *bufio.Writer) {
|
|
|
|
for i, ep := range newEPs {
|
|
|
|
if i > 0 {
|
|
|
|
w.WriteString(", ")
|
|
|
|
}
|
|
|
|
w.WriteString(ep.String())
|
|
|
|
}
|
|
|
|
}))
|
2021-01-20 20:41:25 +00:00
|
|
|
}
|
2021-01-21 16:05:07 +00:00
|
|
|
|
2021-01-20 20:41:25 +00:00
|
|
|
// Delete any prior CalllMeMaybe endpoints that weren't included
|
|
|
|
// in this message.
|
|
|
|
for ep, want := range de.isCallMeMaybeEP {
|
|
|
|
if !want {
|
|
|
|
delete(de.isCallMeMaybeEP, ep)
|
|
|
|
de.deleteEndpointLocked(ep)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-01 22:28:14 +00:00
|
|
|
// Zero out all the lastPing times to force sendPingsLocked to send new ones,
|
|
|
|
// even if it's been less than 5 seconds ago.
|
|
|
|
for _, st := range de.endpointState {
|
|
|
|
st.lastPing = time.Time{}
|
|
|
|
}
|
|
|
|
de.sendPingsLocked(time.Now(), false)
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
func (de *discoEndpoint) populatePeerStatus(ps *ipnstate.PeerStatus) {
|
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
|
|
|
if de.lastSend.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-03 20:44:22 +00:00
|
|
|
ps.LastWrite = de.lastSend
|
|
|
|
|
2020-07-03 18:06:33 +00:00
|
|
|
now := time.Now()
|
2020-07-03 20:44:22 +00:00
|
|
|
if udpAddr, derpAddr := de.addrForSendLocked(now); !udpAddr.IsZero() && derpAddr.IsZero() {
|
2020-07-03 18:06:33 +00:00
|
|
|
ps.CurAddr = udpAddr.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 20:48:32 +00:00
|
|
|
// stopAndReset stops timers associated with de and resets its state back to zero.
|
|
|
|
// It's called when a discovery endpoint is no longer present in the NetworkMap,
|
|
|
|
// or when magicsock is transition from running to stopped state (via SetPrivateKey(zero))
|
|
|
|
func (de *discoEndpoint) stopAndReset() {
|
2021-01-18 23:27:44 +00:00
|
|
|
atomic.AddInt64(&de.numStopAndResetAtomic, 1)
|
2020-06-28 18:53:37 +00:00
|
|
|
de.mu.Lock()
|
|
|
|
defer de.mu.Unlock()
|
|
|
|
|
2020-12-21 18:58:06 +00:00
|
|
|
de.c.logf("[v1] magicsock: doing cleanup for discovery key %x", de.discoKey[:])
|
2020-06-30 22:32:19 +00:00
|
|
|
|
2020-07-30 20:48:32 +00:00
|
|
|
// Zero these fields so if the user re-starts the network, the discovery
|
|
|
|
// state isn't a mix of before & after two sessions.
|
|
|
|
de.lastSend = time.Time{}
|
|
|
|
de.lastFullPing = time.Time{}
|
2021-03-23 17:07:34 +00:00
|
|
|
de.bestAddr = addrLatency{}
|
2020-07-30 20:48:32 +00:00
|
|
|
de.bestAddrAt = time.Time{}
|
|
|
|
de.trustBestAddrUntil = time.Time{}
|
|
|
|
for _, es := range de.endpointState {
|
|
|
|
es.lastPing = time.Time{}
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:45:41 +00:00
|
|
|
for txid, sp := range de.sentPing {
|
|
|
|
de.removeSentPingLocked(txid, sp)
|
2020-06-30 22:32:19 +00:00
|
|
|
}
|
2020-07-03 19:43:39 +00:00
|
|
|
if de.heartBeatTimer != nil {
|
|
|
|
de.heartBeatTimer.Stop()
|
|
|
|
de.heartBeatTimer = nil
|
|
|
|
}
|
2020-08-09 21:49:42 +00:00
|
|
|
de.pendingCLIPings = nil
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
2020-06-30 21:37:35 +00:00
|
|
|
|
2021-01-18 23:27:44 +00:00
|
|
|
func (de *discoEndpoint) numStopAndReset() int64 {
|
|
|
|
return atomic.LoadInt64(&de.numStopAndResetAtomic)
|
|
|
|
}
|
|
|
|
|
2020-07-02 17:48:13 +00:00
|
|
|
// derpStr replaces DERP IPs in s with "derp-".
|
|
|
|
func derpStr(s string) string { return strings.ReplaceAll(s, "127.3.3.40:", "derp-") }
|
2020-09-03 22:45:41 +00:00
|
|
|
|
2021-01-18 23:27:44 +00:00
|
|
|
// ippEndpointCache is a mutex-free single-element cache, mapping from
|
|
|
|
// a single netaddr.IPPort to a single endpoint.
|
|
|
|
type ippEndpointCache struct {
|
|
|
|
ipp netaddr.IPPort
|
|
|
|
gen int64
|
|
|
|
de *discoEndpoint
|
|
|
|
}
|