2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
package magicsock
|
|
|
|
|
|
|
|
import (
|
2020-03-03 15:39:40 +00:00
|
|
|
"bytes"
|
2020-07-25 00:32:18 +00:00
|
|
|
"context"
|
2020-03-03 21:50:47 +00:00
|
|
|
crand "crypto/rand"
|
|
|
|
"crypto/tls"
|
2021-11-10 20:09:54 +00:00
|
|
|
"encoding/binary"
|
2021-02-10 22:47:26 +00:00
|
|
|
"errors"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
2022-09-15 12:06:59 +00:00
|
|
|
"io"
|
2021-11-10 20:09:54 +00:00
|
|
|
"math/rand"
|
2020-02-05 22:16:58 +00:00
|
|
|
"net"
|
2020-03-03 21:50:47 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2022-03-17 00:16:55 +00:00
|
|
|
"net/netip"
|
2020-03-06 21:35:59 +00:00
|
|
|
"os"
|
2021-02-07 05:27:02 +00:00
|
|
|
"runtime"
|
2020-08-06 17:23:16 +00:00
|
|
|
"strconv"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
2020-05-14 03:44:58 +00:00
|
|
|
"sync"
|
2023-05-03 00:49:56 +00:00
|
|
|
"sync/atomic"
|
2024-04-15 20:57:55 +00:00
|
|
|
"syscall"
|
2020-02-05 22:16:58 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2020-08-06 21:57:03 +00:00
|
|
|
"unsafe"
|
2020-03-03 11:51:31 +00:00
|
|
|
|
2024-09-25 15:20:56 +00:00
|
|
|
qt "github.com/frankban/quicktest"
|
2022-12-09 23:12:20 +00:00
|
|
|
wgconn "github.com/tailscale/wireguard-go/conn"
|
|
|
|
"github.com/tailscale/wireguard-go/device"
|
|
|
|
"github.com/tailscale/wireguard-go/tun/tuntest"
|
2021-10-28 00:42:33 +00:00
|
|
|
"go4.org/mem"
|
2023-08-17 16:40:19 +00:00
|
|
|
xmaps "golang.org/x/exp/maps"
|
2023-05-03 00:49:56 +00:00
|
|
|
"golang.org/x/net/icmp"
|
|
|
|
"golang.org/x/net/ipv4"
|
2023-01-18 16:41:58 +00:00
|
|
|
"tailscale.com/cmd/testwrapper/flakytest"
|
2023-09-12 02:17:24 +00:00
|
|
|
"tailscale.com/control/controlknobs"
|
2020-03-03 21:50:47 +00:00
|
|
|
"tailscale.com/derp"
|
|
|
|
"tailscale.com/derp/derphttp"
|
2022-08-29 14:57:54 +00:00
|
|
|
"tailscale.com/disco"
|
2023-09-12 02:17:24 +00:00
|
|
|
"tailscale.com/envknob"
|
2024-03-08 17:32:15 +00:00
|
|
|
"tailscale.com/health"
|
2020-07-25 00:32:18 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2022-11-28 23:59:33 +00:00
|
|
|
"tailscale.com/net/connstats"
|
2022-07-25 03:08:42 +00:00
|
|
|
"tailscale.com/net/netaddr"
|
2024-03-08 17:32:15 +00:00
|
|
|
"tailscale.com/net/netcheck"
|
2024-04-27 01:28:01 +00:00
|
|
|
"tailscale.com/net/netmon"
|
2023-04-06 00:28:28 +00:00
|
|
|
"tailscale.com/net/packet"
|
2023-05-03 00:49:56 +00:00
|
|
|
"tailscale.com/net/ping"
|
2020-05-25 16:15:50 +00:00
|
|
|
"tailscale.com/net/stun/stuntest"
|
2021-03-27 05:14:08 +00:00
|
|
|
"tailscale.com/net/tstun"
|
2020-05-17 16:51:38 +00:00
|
|
|
"tailscale.com/tailcfg"
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
"tailscale.com/tstest"
|
2020-07-10 21:26:04 +00:00
|
|
|
"tailscale.com/tstest/natlab"
|
2023-05-03 00:49:56 +00:00
|
|
|
"tailscale.com/tstime/mono"
|
2020-03-03 21:50:47 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-03-07 01:50:36 +00:00
|
|
|
"tailscale.com/types/logger"
|
2022-10-27 23:26:52 +00:00
|
|
|
"tailscale.com/types/netlogtype"
|
2021-02-05 23:44:46 +00:00
|
|
|
"tailscale.com/types/netmap"
|
2020-07-10 21:26:04 +00:00
|
|
|
"tailscale.com/types/nettype"
|
2023-04-13 17:12:31 +00:00
|
|
|
"tailscale.com/types/ptr"
|
2021-02-10 19:49:30 +00:00
|
|
|
"tailscale.com/util/cibuild"
|
2024-10-29 09:19:40 +00:00
|
|
|
"tailscale.com/util/must"
|
2021-09-15 23:43:44 +00:00
|
|
|
"tailscale.com/util/racebuild"
|
2023-09-09 16:55:57 +00:00
|
|
|
"tailscale.com/util/set"
|
2024-09-23 16:34:00 +00:00
|
|
|
"tailscale.com/util/usermetric"
|
2020-05-13 13:16:17 +00:00
|
|
|
"tailscale.com/wgengine/filter"
|
2021-01-29 20:16:36 +00:00
|
|
|
"tailscale.com/wgengine/wgcfg"
|
2021-02-05 20:44:43 +00:00
|
|
|
"tailscale.com/wgengine/wgcfg/nmcfg"
|
2021-01-21 20:33:54 +00:00
|
|
|
"tailscale.com/wgengine/wglog"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
2020-10-28 15:23:12 +00:00
|
|
|
func init() {
|
|
|
|
os.Setenv("IN_TS_TEST", "1")
|
2021-10-22 17:17:53 +00:00
|
|
|
|
|
|
|
// Some of these tests lose a disco pong before establishing a
|
|
|
|
// direct connection, so instead of waiting 5 seconds in the
|
|
|
|
// test, reduce the wait period.
|
|
|
|
// (In particular, TestActiveDiscovery.)
|
|
|
|
discoPingInterval = 100 * time.Millisecond
|
|
|
|
pingTimeoutDuration = 100 * time.Millisecond
|
2020-10-28 15:23:12 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 17:01:48 +00:00
|
|
|
// WaitReady waits until the magicsock is entirely initialized and connected
|
|
|
|
// to its home DERP server. This is normally not necessary, since magicsock
|
|
|
|
// is intended to be entirely asynchronous, but it helps eliminate race
|
|
|
|
// conditions in tests. In particular, you can't expect two test magicsocks
|
|
|
|
// to be able to connect to each other through a test DERP unless they are
|
|
|
|
// both fully initialized before you try.
|
2020-12-03 04:12:14 +00:00
|
|
|
func (c *Conn) WaitReady(t testing.TB) {
|
2020-05-14 17:01:48 +00:00
|
|
|
t.Helper()
|
|
|
|
timer := time.NewTimer(10 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
select {
|
|
|
|
case <-c.derpStarted:
|
|
|
|
return
|
|
|
|
case <-c.connCtx.Done():
|
|
|
|
t.Fatalf("magicsock.Conn closed while waiting for readiness")
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatalf("timeout waiting for readiness")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
2021-10-28 22:42:50 +00:00
|
|
|
d := derp.NewServer(key.NewNode(), logf)
|
2020-07-24 21:19:20 +00:00
|
|
|
|
|
|
|
httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d))
|
|
|
|
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
|
|
|
|
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
|
|
|
|
httpsrv.StartTLS()
|
|
|
|
|
|
|
|
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l)
|
|
|
|
|
|
|
|
m := &tailcfg.DERPMap{
|
|
|
|
Regions: map[int]*tailcfg.DERPRegion{
|
2021-04-01 16:54:54 +00:00
|
|
|
1: {
|
2020-07-24 21:19:20 +00:00
|
|
|
RegionID: 1,
|
|
|
|
RegionCode: "test",
|
|
|
|
Nodes: []*tailcfg.DERPNode{
|
|
|
|
{
|
2021-07-09 18:16:43 +00:00
|
|
|
Name: "t1",
|
|
|
|
RegionID: 1,
|
|
|
|
HostName: "test-node.unused",
|
|
|
|
IPv4: "127.0.0.1",
|
|
|
|
IPv6: "none",
|
|
|
|
STUNPort: stunAddr.Port,
|
|
|
|
DERPPort: httpsrv.Listener.Addr().(*net.TCPAddr).Port,
|
|
|
|
InsecureForTests: true,
|
|
|
|
STUNTestIP: stunIP.String(),
|
2020-07-24 21:19:20 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup = func() {
|
|
|
|
httpsrv.CloseClientConnections()
|
|
|
|
httpsrv.Close()
|
|
|
|
d.Close()
|
|
|
|
stunCleanup()
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, cleanup
|
|
|
|
}
|
|
|
|
|
|
|
|
// magicStack is a magicsock, plus all the stuff around it that's
|
|
|
|
// necessary to send and receive packets to test e2e wireguard
|
|
|
|
// happiness.
|
|
|
|
type magicStack struct {
|
2021-10-28 18:07:25 +00:00
|
|
|
privateKey key.NodePrivate
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer
|
2022-12-16 18:14:00 +00:00
|
|
|
stats *connstats.Statistics // per-connection statistics
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
conn *Conn // the magicsock itself
|
|
|
|
tun *tuntest.ChannelTUN // TUN device to send/receive packets
|
|
|
|
tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks
|
|
|
|
dev *device.Device // the wireguard-go Device that connects the previous things
|
|
|
|
wgLogger *wglog.Logger // wireguard-go log wrapper
|
2024-04-27 01:28:01 +00:00
|
|
|
netMon *netmon.Monitor // always non-nil
|
2024-09-23 16:34:00 +00:00
|
|
|
metrics *usermetric.Registry
|
2020-07-24 21:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newMagicStack builds and initializes an idle magicsock and
|
|
|
|
// friends. You need to call conn.SetNetworkMap and dev.Reconfig
|
|
|
|
// before anything interesting happens.
|
2021-08-26 02:39:20 +00:00
|
|
|
func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack {
|
2021-10-28 18:07:25 +00:00
|
|
|
privateKey := key.NewNode()
|
2021-10-06 17:18:12 +00:00
|
|
|
return newMagicStackWithKey(t, logf, l, derpMap, privateKey)
|
|
|
|
}
|
|
|
|
|
2021-10-28 18:07:25 +00:00
|
|
|
func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack {
|
2021-10-06 17:18:12 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
netMon, err := netmon.New(logf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("netmon.New: %v", err)
|
|
|
|
}
|
2024-10-16 16:33:21 +00:00
|
|
|
ht := new(health.Tracker)
|
2024-04-27 01:28:01 +00:00
|
|
|
|
2024-09-23 16:34:00 +00:00
|
|
|
var reg usermetric.Registry
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary
|
2020-07-24 21:19:20 +00:00
|
|
|
conn, err := NewConn(Options{
|
2024-04-27 01:28:01 +00:00
|
|
|
NetMon: netMon,
|
2024-09-23 16:34:00 +00:00
|
|
|
Metrics: ®,
|
2021-08-26 05:26:25 +00:00
|
|
|
Logf: logf,
|
2024-10-16 16:33:21 +00:00
|
|
|
HealthTracker: ht,
|
2024-04-18 04:32:18 +00:00
|
|
|
DisablePortMapper: true,
|
2021-08-26 05:26:25 +00:00
|
|
|
TestOnlyPacketListener: l,
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
EndpointsFunc: func(eps []tailcfg.Endpoint) {
|
2020-07-24 21:19:20 +00:00
|
|
|
epCh <- eps
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("constructing magicsock: %v", err)
|
|
|
|
}
|
|
|
|
conn.SetDERPMap(derpMap)
|
|
|
|
if err := conn.SetPrivateKey(privateKey); err != nil {
|
|
|
|
t.Fatalf("setting private key in magicsock: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tun := tuntest.NewChannelTUN()
|
2024-09-23 16:34:00 +00:00
|
|
|
tsTun := tstun.Wrap(logf, tun.TUN(), ®)
|
2020-11-10 06:02:03 +00:00
|
|
|
tsTun.SetFilter(filter.NewAllowAllForTest(logf))
|
2023-10-13 19:41:10 +00:00
|
|
|
tsTun.Start()
|
2020-07-24 21:19:20 +00:00
|
|
|
|
2021-01-21 20:33:54 +00:00
|
|
|
wgLogger := wglog.NewLogger(logf)
|
2021-11-16 19:35:25 +00:00
|
|
|
dev := wgcfg.NewDevice(tsTun, conn.Bind(), wgLogger.DeviceLogger)
|
2020-07-24 21:19:20 +00:00
|
|
|
dev.Up()
|
|
|
|
|
|
|
|
// Wait for magicsock to connect up to DERP.
|
|
|
|
conn.WaitReady(t)
|
|
|
|
|
|
|
|
// Wait for first endpoint update to be available
|
|
|
|
deadline := time.Now().Add(2 * time.Second)
|
|
|
|
for len(epCh) == 0 && time.Now().Before(deadline) {
|
2020-07-28 17:04:09 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2020-07-24 21:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &magicStack{
|
|
|
|
privateKey: privateKey,
|
|
|
|
epCh: epCh,
|
|
|
|
conn: conn,
|
|
|
|
tun: tun,
|
|
|
|
tsTun: tsTun,
|
|
|
|
dev: dev,
|
2021-01-21 20:33:54 +00:00
|
|
|
wgLogger: wgLogger,
|
2024-04-27 01:28:01 +00:00
|
|
|
netMon: netMon,
|
2024-09-23 16:34:00 +00:00
|
|
|
metrics: ®,
|
2020-07-24 21:19:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-21 20:33:54 +00:00
|
|
|
func (s *magicStack) Reconfig(cfg *wgcfg.Config) error {
|
2023-04-06 00:28:28 +00:00
|
|
|
s.tsTun.SetWGConfig(cfg)
|
2021-01-21 20:33:54 +00:00
|
|
|
s.wgLogger.SetPeers(cfg.Peers)
|
2021-01-29 20:16:36 +00:00
|
|
|
return wgcfg.ReconfigDevice(s.dev, cfg, s.conn.logf)
|
2021-01-21 20:33:54 +00:00
|
|
|
}
|
|
|
|
|
2020-07-27 15:09:54 +00:00
|
|
|
func (s *magicStack) String() string {
|
|
|
|
pub := s.Public()
|
|
|
|
return pub.ShortString()
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:19:20 +00:00
|
|
|
func (s *magicStack) Close() {
|
|
|
|
s.dev.Close()
|
|
|
|
s.conn.Close()
|
2024-04-27 01:28:01 +00:00
|
|
|
s.netMon.Close()
|
2020-07-24 21:19:20 +00:00
|
|
|
}
|
|
|
|
|
2021-10-28 23:56:44 +00:00
|
|
|
func (s *magicStack) Public() key.NodePublic {
|
|
|
|
return s.privateKey.Public()
|
2020-07-27 15:09:54 +00:00
|
|
|
}
|
|
|
|
|
2023-08-23 19:13:38 +00:00
|
|
|
// Status returns a subset of the ipnstate.Status, only involving
|
|
|
|
// the magicsock-specific parts.
|
2020-07-25 00:32:18 +00:00
|
|
|
func (s *magicStack) Status() *ipnstate.Status {
|
|
|
|
var sb ipnstate.StatusBuilder
|
2022-12-19 18:23:47 +00:00
|
|
|
sb.WantPeers = true
|
2020-07-25 00:32:18 +00:00
|
|
|
s.conn.UpdateStatus(&sb)
|
|
|
|
return sb.Status()
|
|
|
|
}
|
|
|
|
|
2020-07-27 20:25:25 +00:00
|
|
|
// IP returns the Tailscale IP address assigned to this magicStack.
|
|
|
|
//
|
|
|
|
// Something external needs to provide a NetworkMap and WireGuard
|
|
|
|
// configs to the magicStack in order for it to acquire an IP
|
|
|
|
// address. See meshStacks for one possible source of netmaps and IPs.
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
func (s *magicStack) IP() netip.Addr {
|
2020-07-27 20:25:25 +00:00
|
|
|
for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
|
2023-08-23 19:13:38 +00:00
|
|
|
s.conn.mu.Lock()
|
2023-09-12 02:17:24 +00:00
|
|
|
addr := s.conn.firstAddrForTest
|
2023-08-23 19:13:38 +00:00
|
|
|
s.conn.mu.Unlock()
|
2023-09-12 02:17:24 +00:00
|
|
|
if addr.IsValid() {
|
|
|
|
return addr
|
2020-07-27 20:25:25 +00:00
|
|
|
}
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
2021-03-29 20:50:44 +00:00
|
|
|
panic("timed out waiting for magicstack to get an IP assigned")
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// meshStacks monitors epCh on all given ms, and plumbs network maps
|
|
|
|
// and WireGuard configs into everyone to form a full mesh that has up
|
|
|
|
// to date endpoint info. Think of it as an extremely stripped down
|
|
|
|
// and purpose-built Tailscale control plane.
|
2021-08-26 02:39:20 +00:00
|
|
|
func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkMap), ms ...*magicStack) (cleanup func()) {
|
2020-07-25 00:32:18 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
// Serialize all reconfigurations globally, just to keep things
|
|
|
|
// simpler.
|
|
|
|
var (
|
|
|
|
mu sync.Mutex
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
eps = make([][]tailcfg.Endpoint, len(ms))
|
2020-07-25 00:32:18 +00:00
|
|
|
)
|
|
|
|
|
2021-02-05 23:44:46 +00:00
|
|
|
buildNetmapLocked := func(myIdx int) *netmap.NetworkMap {
|
2020-07-25 00:32:18 +00:00
|
|
|
me := ms[myIdx]
|
2021-02-05 23:44:46 +00:00
|
|
|
nm := &netmap.NetworkMap{
|
2021-10-28 18:07:25 +00:00
|
|
|
PrivateKey: me.privateKey,
|
2021-10-30 01:01:03 +00:00
|
|
|
NodeKey: me.privateKey.Public(),
|
2023-09-18 06:31:34 +00:00
|
|
|
SelfNode: (&tailcfg.Node{
|
|
|
|
Addresses: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(1, 0, 0, byte(myIdx+1)), 32)},
|
|
|
|
}).View(),
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
|
|
|
for i, peer := range ms {
|
|
|
|
if i == myIdx {
|
|
|
|
continue
|
|
|
|
}
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
addrs := []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(1, 0, 0, byte(i+1)), 32)}
|
2020-07-25 00:32:18 +00:00
|
|
|
peer := &tailcfg.Node{
|
|
|
|
ID: tailcfg.NodeID(i + 1),
|
|
|
|
Name: fmt.Sprintf("node%d", i+1),
|
2021-11-02 03:55:52 +00:00
|
|
|
Key: peer.privateKey.Public(),
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoKey: peer.conn.DiscoPublicKey(),
|
2020-07-25 00:32:18 +00:00
|
|
|
Addresses: addrs,
|
|
|
|
AllowedIPs: addrs,
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: epFromTyped(eps[i]),
|
2020-07-25 00:32:18 +00:00
|
|
|
DERP: "127.3.3.40:1",
|
|
|
|
}
|
2023-08-18 14:57:44 +00:00
|
|
|
nm.Peers = append(nm.Peers, peer.View())
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
if mutateNetmap != nil {
|
|
|
|
mutateNetmap(myIdx, nm)
|
|
|
|
}
|
2020-07-25 00:32:18 +00:00
|
|
|
return nm
|
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
updateEps := func(idx int, newEps []tailcfg.Endpoint) {
|
2020-07-25 00:32:18 +00:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
|
|
|
|
eps[idx] = newEps
|
|
|
|
|
|
|
|
for i, m := range ms {
|
2021-02-05 23:44:46 +00:00
|
|
|
nm := buildNetmapLocked(i)
|
|
|
|
m.conn.SetNetworkMap(nm)
|
2023-09-09 16:55:57 +00:00
|
|
|
peerSet := make(set.Set[key.NodePublic], len(nm.Peers))
|
2021-02-05 23:44:46 +00:00
|
|
|
for _, peer := range nm.Peers {
|
2023-09-09 16:55:57 +00:00
|
|
|
peerSet.Add(peer.Key())
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
|
|
|
m.conn.UpdatePeers(peerSet)
|
2024-05-17 20:47:57 +00:00
|
|
|
wg, err := nmcfg.WGCfg(nm, logf, 0, "")
|
2020-07-25 00:32:18 +00:00
|
|
|
if err != nil {
|
|
|
|
// We're too far from the *testing.T to be graceful,
|
|
|
|
// blow up. Shouldn't happen anyway.
|
|
|
|
panic(fmt.Sprintf("failed to construct wgcfg from netmap: %v", err))
|
|
|
|
}
|
2021-01-21 20:33:54 +00:00
|
|
|
if err := m.Reconfig(wg); err != nil {
|
2021-08-30 22:32:06 +00:00
|
|
|
if ctx.Err() != nil || errors.Is(err, errConnClosed) {
|
2021-08-27 21:13:49 +00:00
|
|
|
// shutdown race, don't care.
|
|
|
|
return
|
|
|
|
}
|
2020-07-25 00:32:18 +00:00
|
|
|
panic(fmt.Sprintf("device reconfig failed: %v", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(ms))
|
|
|
|
for i := range ms {
|
|
|
|
go func(myIdx int) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case eps := <-ms[myIdx].epCh:
|
|
|
|
logf("conn%d endpoints update", myIdx+1)
|
|
|
|
updateEps(myIdx, eps)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
return func() {
|
|
|
|
cancel()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
func TestNewConn(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
tstest.PanicOnLog()
|
2021-02-02 19:30:46 +00:00
|
|
|
tstest.ResourceCheck(t)
|
2020-03-03 21:50:47 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
epCh := make(chan string, 16)
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
epFunc := func(endpoints []tailcfg.Endpoint) {
|
2020-02-05 22:16:58 +00:00
|
|
|
for _, ep := range endpoints {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
epCh <- ep.Addr.String()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: "))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("netmon.New: %v", err)
|
|
|
|
}
|
|
|
|
defer netMon.Close()
|
|
|
|
|
2020-03-12 21:14:48 +00:00
|
|
|
stunAddr, stunCleanupFn := stuntest.Serve(t)
|
2020-03-03 11:51:31 +00:00
|
|
|
defer stunCleanupFn()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
port := pickPort(t)
|
2020-05-17 16:51:38 +00:00
|
|
|
conn, err := NewConn(Options{
|
2024-04-18 04:32:18 +00:00
|
|
|
Port: port,
|
|
|
|
DisablePortMapper: true,
|
|
|
|
EndpointsFunc: epFunc,
|
|
|
|
Logf: t.Logf,
|
2024-04-27 01:28:01 +00:00
|
|
|
NetMon: netMon,
|
2024-09-23 16:34:00 +00:00
|
|
|
Metrics: new(usermetric.Registry),
|
2020-02-05 22:16:58 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
2020-05-17 16:51:38 +00:00
|
|
|
conn.SetDERPMap(stuntest.DERPMapOf(stunAddr.String()))
|
2021-10-28 18:07:25 +00:00
|
|
|
conn.SetPrivateKey(key.NewNode())
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
go func() {
|
2022-12-09 01:58:14 +00:00
|
|
|
pkts := make([][]byte, 1)
|
|
|
|
sizes := make([]int, 1)
|
|
|
|
eps := make([]wgconn.Endpoint, 1)
|
|
|
|
pkts[0] = make([]byte, 64<<10)
|
2023-04-14 15:09:09 +00:00
|
|
|
receiveIPv4 := conn.receiveIPv4()
|
2020-02-05 22:16:58 +00:00
|
|
|
for {
|
2023-04-14 15:09:09 +00:00
|
|
|
_, err := receiveIPv4(pkts, sizes, eps)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-03-06 20:37:19 +00:00
|
|
|
timeout := time.After(10 * time.Second)
|
2020-02-05 22:16:58 +00:00
|
|
|
var endpoints []string
|
|
|
|
suffix := fmt.Sprintf(":%d", port)
|
|
|
|
collectEndpoints:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ep := <-epCh:
|
2023-04-14 20:37:10 +00:00
|
|
|
t.Logf("TestNewConn: got endpoint: %v", ep)
|
2020-02-05 22:16:58 +00:00
|
|
|
endpoints = append(endpoints, ep)
|
|
|
|
if strings.HasSuffix(ep, suffix) {
|
|
|
|
break collectEndpoints
|
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-timeout:
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Fatalf("timeout with endpoints: %v", endpoints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-03 04:12:14 +00:00
|
|
|
func pickPort(t testing.TB) uint16 {
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Helper()
|
2020-10-28 15:23:12 +00:00
|
|
|
conn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
return uint16(conn.LocalAddr().(*net.UDPAddr).Port)
|
|
|
|
}
|
2020-02-18 21:32:04 +00:00
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
func TestPickDERPFallback(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
tstest.PanicOnLog()
|
2021-02-02 19:30:46 +00:00
|
|
|
tstest.ResourceCheck(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
|
2024-07-10 21:46:31 +00:00
|
|
|
c := newConn(t.Logf)
|
2021-06-25 18:44:40 +00:00
|
|
|
dm := &tailcfg.DERPMap{
|
|
|
|
Regions: map[int]*tailcfg.DERPRegion{
|
2021-12-15 16:42:25 +00:00
|
|
|
1: {},
|
|
|
|
2: {},
|
|
|
|
3: {},
|
|
|
|
4: {},
|
|
|
|
5: {},
|
|
|
|
6: {},
|
|
|
|
7: {},
|
|
|
|
8: {},
|
2021-06-25 18:44:40 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
c.derpMap = dm
|
2020-03-04 06:21:56 +00:00
|
|
|
a := c.pickDERPFallback()
|
|
|
|
if a == 0 {
|
|
|
|
t.Fatalf("pickDERPFallback returned 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that it's consistent.
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 50 {
|
2020-03-04 06:21:56 +00:00
|
|
|
b := c.pickDERPFallback()
|
|
|
|
if a != b {
|
|
|
|
t.Fatalf("got inconsistent %d vs %d values", a, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that that the pointer value of c is blended in and
|
|
|
|
// distribution over nodes works.
|
|
|
|
got := map[int]int{}
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 50 {
|
2024-07-10 21:46:31 +00:00
|
|
|
c = newConn(t.Logf)
|
2021-06-25 18:44:40 +00:00
|
|
|
c.derpMap = dm
|
2020-03-04 06:21:56 +00:00
|
|
|
got[c.pickDERPFallback()]++
|
|
|
|
}
|
|
|
|
t.Logf("distribution: %v", got)
|
|
|
|
if len(got) < 2 {
|
|
|
|
t.Errorf("expected more than 1 node; got %v", got)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that stickiness works.
|
|
|
|
const someNode = 123456
|
|
|
|
c.myDerp = someNode
|
|
|
|
if got := c.pickDERPFallback(); got != someNode {
|
|
|
|
t.Errorf("not sticky: got %v; want %v", got, someNode)
|
|
|
|
}
|
2020-03-25 18:14:29 +00:00
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
// TODO: test that disco-based clients changing to a new DERP
|
|
|
|
// region causes this fallback to also move, once disco clients
|
|
|
|
// have fixed DERP fallback logic.
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 01:50:36 +00:00
|
|
|
// TestDeviceStartStop exercises the startup and shutdown logic of
|
|
|
|
// wireguard-go, which is intimately intertwined with magicsock's own
|
|
|
|
// lifecycle. We seem to be good at generating deadlocks here, so if
|
|
|
|
// this test fails you should suspect a deadlock somewhere in startup
|
|
|
|
// or shutdown. It may be an infrequent flake, so run with
|
|
|
|
// -count=10000 to be sure.
|
|
|
|
func TestDeviceStartStop(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
tstest.PanicOnLog()
|
2021-02-02 19:30:46 +00:00
|
|
|
tstest.ResourceCheck(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: "))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("netmon.New: %v", err)
|
|
|
|
}
|
|
|
|
defer netMon.Close()
|
|
|
|
|
2020-05-17 16:51:38 +00:00
|
|
|
conn, err := NewConn(Options{
|
2021-08-26 02:39:20 +00:00
|
|
|
EndpointsFunc: func(eps []tailcfg.Endpoint) {},
|
|
|
|
Logf: t.Logf,
|
2024-04-27 01:28:01 +00:00
|
|
|
NetMon: netMon,
|
2024-09-23 16:34:00 +00:00
|
|
|
Metrics: new(usermetric.Registry),
|
2020-03-07 01:50:36 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
tun := tuntest.NewChannelTUN()
|
2021-03-23 18:39:06 +00:00
|
|
|
wgLogger := wglog.NewLogger(t.Logf)
|
2021-11-16 19:35:25 +00:00
|
|
|
dev := wgcfg.NewDevice(tun.TUN(), conn.Bind(), wgLogger.DeviceLogger)
|
2020-03-07 01:50:36 +00:00
|
|
|
dev.Up()
|
|
|
|
dev.Close()
|
|
|
|
}
|
|
|
|
|
2021-01-11 01:22:11 +00:00
|
|
|
// Exercise a code path in sendDiscoMessage if the connection has been closed.
|
|
|
|
func TestConnClosed(t *testing.T) {
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{Name: "m1"}
|
|
|
|
m2 := &natlab.Machine{Name: "m2"}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
|
|
|
d := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
|
2021-01-15 01:39:36 +00:00
|
|
|
logf, closeLogf := logger.LogfCloser(t.Logf)
|
|
|
|
defer closeLogf()
|
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP)
|
2021-01-11 01:22:11 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
ms1 := newMagicStack(t, logger.WithPrefix(logf, "conn1: "), d.m1, derpMap)
|
2021-01-11 01:22:11 +00:00
|
|
|
defer ms1.Close()
|
2021-08-26 02:39:20 +00:00
|
|
|
ms2 := newMagicStack(t, logger.WithPrefix(logf, "conn2: "), d.m2, derpMap)
|
2021-01-11 01:22:11 +00:00
|
|
|
defer ms2.Close()
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
cleanup = meshStacks(t.Logf, nil, ms1, ms2)
|
2021-01-11 01:22:11 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2022-07-25 03:08:42 +00:00
|
|
|
pkt := tuntest.Ping(ms2.IP(), ms1.IP())
|
2021-01-11 01:22:11 +00:00
|
|
|
|
|
|
|
if len(ms1.conn.activeDerp) == 0 {
|
|
|
|
t.Errorf("unexpected DERP empty got: %v want: >0", len(ms1.conn.activeDerp))
|
|
|
|
}
|
|
|
|
|
|
|
|
ms1.conn.Close()
|
|
|
|
ms2.conn.Close()
|
|
|
|
|
|
|
|
// This should hit a c.closed conditional in sendDiscoMessage() and return immediately.
|
|
|
|
ms1.tun.Outbound <- pkt
|
|
|
|
select {
|
|
|
|
case <-ms2.tun.Inbound:
|
|
|
|
t.Error("unexpected response with connection closed")
|
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ms1.conn.activeDerp) > 0 {
|
|
|
|
t.Errorf("unexpected DERP active got: %v want:0", len(ms1.conn.activeDerp))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 03:44:58 +00:00
|
|
|
func makeNestable(t *testing.T) (logf logger.Logf, setT func(t *testing.T)) {
|
2020-06-22 08:54:59 +00:00
|
|
|
var mu sync.RWMutex
|
2020-05-14 03:44:58 +00:00
|
|
|
cur := t
|
|
|
|
|
|
|
|
setT = func(t *testing.T) {
|
|
|
|
mu.Lock()
|
|
|
|
cur = t
|
|
|
|
mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-03-16 23:27:57 +00:00
|
|
|
logf = func(s string, args ...any) {
|
2020-06-22 08:54:59 +00:00
|
|
|
mu.RLock()
|
2020-05-14 03:44:58 +00:00
|
|
|
t := cur
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
t.Logf(s, args...)
|
2020-06-22 08:54:59 +00:00
|
|
|
mu.RUnlock()
|
2020-05-14 03:44:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return logf, setT
|
|
|
|
}
|
|
|
|
|
2021-08-26 06:33:46 +00:00
|
|
|
// localhostOnlyListener is a nettype.PacketListener that listens on
|
|
|
|
// localhost (127.0.0.1 or ::1, depending on the requested network)
|
|
|
|
// when asked to listen on the unspecified address.
|
|
|
|
//
|
|
|
|
// It's used in tests where we set up localhost-to-localhost
|
|
|
|
// communication, because if you listen on the unspecified address on
|
|
|
|
// macOS and Windows, you get an interactive firewall consent prompt
|
|
|
|
// to allow the binding, which breaks our CIs.
|
|
|
|
type localhostListener struct{}
|
|
|
|
|
|
|
|
func (localhostListener) ListenPacket(ctx context.Context, network, address string) (net.PacketConn, error) {
|
|
|
|
host, port, err := net.SplitHostPort(address)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
switch network {
|
|
|
|
case "udp4":
|
|
|
|
switch host {
|
|
|
|
case "", "0.0.0.0":
|
|
|
|
host = "127.0.0.1"
|
|
|
|
case "127.0.0.1":
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("localhostListener cannot be asked to listen on %q", address)
|
|
|
|
}
|
|
|
|
case "udp6":
|
|
|
|
switch host {
|
|
|
|
case "", "::":
|
|
|
|
host = "::1"
|
|
|
|
case "::1":
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("localhostListener cannot be asked to listen on %q", address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var conf net.ListenConfig
|
|
|
|
return conf.ListenPacket(ctx, network, net.JoinHostPort(host, port))
|
|
|
|
}
|
|
|
|
|
2020-03-03 15:39:40 +00:00
|
|
|
func TestTwoDevicePing(t *testing.T) {
|
2024-04-16 16:10:50 +00:00
|
|
|
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762")
|
2021-08-26 06:33:46 +00:00
|
|
|
l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
|
2020-07-27 15:21:17 +00:00
|
|
|
n := &devices{
|
|
|
|
m1: l,
|
|
|
|
m1IP: ip,
|
|
|
|
m2: l,
|
|
|
|
m2IP: ip,
|
|
|
|
stun: l,
|
|
|
|
stunIP: ip,
|
|
|
|
}
|
|
|
|
testTwoDevicePing(t, n)
|
|
|
|
}
|
|
|
|
|
2021-10-06 17:18:12 +00:00
|
|
|
func TestDiscokeyChange(t *testing.T) {
|
|
|
|
tstest.PanicOnLog()
|
|
|
|
tstest.ResourceCheck(t)
|
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1))
|
|
|
|
defer cleanup()
|
|
|
|
|
2021-10-28 18:07:25 +00:00
|
|
|
m1Key := key.NewNode()
|
2021-10-06 17:18:12 +00:00
|
|
|
m1 := newMagicStackWithKey(t, t.Logf, localhostListener{}, derpMap, m1Key)
|
|
|
|
defer m1.Close()
|
|
|
|
m2 := newMagicStack(t, t.Logf, localhostListener{}, derpMap)
|
|
|
|
defer m2.Close()
|
|
|
|
|
|
|
|
var (
|
|
|
|
mu sync.Mutex
|
|
|
|
// Start with some random discoKey that isn't actually m1's key,
|
|
|
|
// to simulate m2 coming up with knowledge of an old, expired
|
|
|
|
// discokey. We'll switch to the correct one later in the test.
|
2021-10-29 21:27:29 +00:00
|
|
|
m1DiscoKey = key.NewDisco().Public()
|
2021-10-06 17:18:12 +00:00
|
|
|
)
|
|
|
|
setm1Key := func(idx int, nm *netmap.NetworkMap) {
|
|
|
|
if idx != 1 {
|
|
|
|
// only mutate m2's netmap
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(nm.Peers) != 1 {
|
|
|
|
// m1 not in netmap yet.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
2023-08-18 14:57:44 +00:00
|
|
|
mut := nm.Peers[0].AsStruct()
|
|
|
|
mut.DiscoKey = m1DiscoKey
|
|
|
|
nm.Peers[0] = mut.View()
|
2021-10-06 17:18:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanupMesh := meshStacks(t.Logf, setm1Key, m1, m2)
|
|
|
|
defer cleanupMesh()
|
|
|
|
|
|
|
|
// Wait for both peers to know about each other.
|
|
|
|
for {
|
|
|
|
if s1 := m1.Status(); len(s1.Peer) != 1 {
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if s2 := m2.Status(); len(s2.Peer) != 1 {
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
m1DiscoKey = m1.conn.DiscoPublicKey()
|
|
|
|
mu.Unlock()
|
|
|
|
|
|
|
|
// Manually trigger an endpoint update to meshStacks, so it hands
|
|
|
|
// m2 a new netmap.
|
|
|
|
m1.conn.mu.Lock()
|
|
|
|
m1.epCh <- m1.conn.lastEndpoints
|
|
|
|
m1.conn.mu.Unlock()
|
|
|
|
|
|
|
|
cleanup = newPinger(t, t.Logf, m1, m2)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
mustDirect(t, t.Logf, m1, m2)
|
|
|
|
mustDirect(t, t.Logf, m2, m1)
|
|
|
|
}
|
|
|
|
|
2020-07-27 15:21:17 +00:00
|
|
|
func TestActiveDiscovery(t *testing.T) {
|
2023-10-07 01:54:59 +00:00
|
|
|
tstest.ResourceCheck(t)
|
|
|
|
|
2020-07-27 15:21:17 +00:00
|
|
|
t.Run("simple_internet", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{Name: "m1"}
|
|
|
|
m2 := &natlab.Machine{Name: "m2"}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
2020-07-11 06:48:08 +00:00
|
|
|
n := &devices{
|
2020-07-27 15:21:17 +00:00
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
2020-07-11 06:48:08 +00:00
|
|
|
}
|
2020-07-27 15:21:17 +00:00
|
|
|
testActiveDiscovery(t, n)
|
2020-07-10 21:26:04 +00:00
|
|
|
})
|
2020-07-11 07:03:19 +00:00
|
|
|
|
2020-07-27 20:34:41 +00:00
|
|
|
t.Run("facing_easy_firewalls", func(t *testing.T) {
|
2020-07-27 15:21:17 +00:00
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{
|
|
|
|
Name: "m1",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
m2 := &natlab.Machine{
|
|
|
|
Name: "m2",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
|
|
|
n := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
testActiveDiscovery(t, n)
|
2020-07-10 21:26:04 +00:00
|
|
|
})
|
2020-07-27 15:21:17 +00:00
|
|
|
|
|
|
|
t.Run("facing_nats", func(t *testing.T) {
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{
|
|
|
|
Name: "m1",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
nat1 := &natlab.Machine{
|
|
|
|
Name: "nat1",
|
|
|
|
}
|
|
|
|
m2 := &natlab.Machine{
|
|
|
|
Name: "m2",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
nat2 := &natlab.Machine{
|
|
|
|
Name: "nat2",
|
|
|
|
}
|
|
|
|
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
lan1 := &natlab.Network{
|
|
|
|
Name: "lan1",
|
2022-07-26 03:55:44 +00:00
|
|
|
Prefix4: netip.MustParsePrefix("192.168.0.0/24"),
|
2020-07-27 15:21:17 +00:00
|
|
|
}
|
|
|
|
lan2 := &natlab.Network{
|
|
|
|
Name: "lan2",
|
2022-07-26 03:55:44 +00:00
|
|
|
Prefix4: netip.MustParsePrefix("192.168.1.0/24"),
|
2020-07-27 15:21:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
nat1WAN := nat1.Attach("wan", inet)
|
|
|
|
nat1LAN := nat1.Attach("lan1", lan1)
|
|
|
|
nat2WAN := nat2.Attach("wan", inet)
|
|
|
|
nat2LAN := nat2.Attach("lan2", lan2)
|
|
|
|
m1if := m1.Attach("eth0", lan1)
|
|
|
|
m2if := m2.Attach("eth0", lan2)
|
|
|
|
lan1.SetDefaultGateway(nat1LAN)
|
|
|
|
lan2.SetDefaultGateway(nat2LAN)
|
|
|
|
|
|
|
|
nat1.PacketHandler = &natlab.SNAT44{
|
|
|
|
Machine: nat1,
|
|
|
|
ExternalInterface: nat1WAN,
|
|
|
|
Firewall: &natlab.Firewall{
|
|
|
|
TrustedInterface: nat1LAN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nat2.PacketHandler = &natlab.SNAT44{
|
|
|
|
Machine: nat2,
|
|
|
|
ExternalInterface: nat2WAN,
|
|
|
|
Firewall: &natlab.Firewall{
|
|
|
|
TrustedInterface: nat2LAN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
n := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
testActiveDiscovery(t, n)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-07-11 06:48:08 +00:00
|
|
|
type devices struct {
|
|
|
|
m1 nettype.PacketListener
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
m1IP netip.Addr
|
2020-07-11 06:48:08 +00:00
|
|
|
|
|
|
|
m2 nettype.PacketListener
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
m2IP netip.Addr
|
2020-07-11 06:48:08 +00:00
|
|
|
|
|
|
|
stun nettype.PacketListener
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
stunIP netip.Addr
|
2020-07-11 06:48:08 +00:00
|
|
|
}
|
|
|
|
|
2020-07-27 15:09:54 +00:00
|
|
|
// newPinger starts continuously sending test packets from srcM to
|
|
|
|
// dstM, until cleanup is invoked to stop it. Each ping has 1 second
|
|
|
|
// to transit the network. It is a test failure to lose a ping.
|
2020-07-27 20:25:25 +00:00
|
|
|
func newPinger(t *testing.T, logf logger.Logf, src, dst *magicStack) (cleanup func()) {
|
2020-07-27 15:09:54 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
done := make(chan struct{})
|
|
|
|
one := func() bool {
|
|
|
|
// TODO(danderson): requiring exactly zero packet loss
|
|
|
|
// will probably be too strict for some tests we'd like to
|
|
|
|
// run (e.g. discovery switching to a new path on
|
|
|
|
// failure). Figure out what kind of thing would be
|
|
|
|
// acceptable to test instead of "every ping must
|
|
|
|
// transit".
|
2022-07-25 03:08:42 +00:00
|
|
|
pkt := tuntest.Ping(dst.IP(), src.IP())
|
2020-07-27 20:32:45 +00:00
|
|
|
select {
|
|
|
|
case src.tun.Outbound <- pkt:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
}
|
2020-07-27 15:09:54 +00:00
|
|
|
select {
|
2020-07-27 20:25:25 +00:00
|
|
|
case <-dst.tun.Inbound:
|
2020-07-27 15:09:54 +00:00
|
|
|
return true
|
2020-07-27 16:20:31 +00:00
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
// Very generous timeout here because depending on
|
|
|
|
// magicsock setup races, the first handshake might get
|
|
|
|
// eaten by the receiving end (if wireguard-go hasn't been
|
|
|
|
// configured quite yet), so we have to wait for at least
|
|
|
|
// the first retransmit from wireguard before we declare
|
|
|
|
// failure.
|
2020-07-27 15:09:54 +00:00
|
|
|
t.Errorf("timed out waiting for ping to transit")
|
|
|
|
return true
|
|
|
|
case <-ctx.Done():
|
2020-07-27 19:46:34 +00:00
|
|
|
// Try a little bit longer to consume the packet we're
|
|
|
|
// waiting for. This is to deal with shutdown races, where
|
|
|
|
// natlab may still be delivering a packet to us from a
|
|
|
|
// goroutine.
|
|
|
|
select {
|
2020-07-27 20:25:25 +00:00
|
|
|
case <-dst.tun.Inbound:
|
2020-07-27 19:46:34 +00:00
|
|
|
case <-time.After(time.Second):
|
|
|
|
}
|
2020-07-27 15:09:54 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup = func() {
|
|
|
|
cancel()
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
|
|
|
// Synchronously transit one ping to get things started. This is
|
|
|
|
// nice because it means that newPinger returning means we've
|
|
|
|
// worked through initial connectivity.
|
|
|
|
if !one() {
|
|
|
|
cleanup()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-03-29 20:50:44 +00:00
|
|
|
logf("sending ping stream from %s (%s) to %s (%s)", src, src.IP(), dst, dst.IP())
|
2020-07-27 15:09:54 +00:00
|
|
|
defer close(done)
|
|
|
|
for one() {
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return cleanup
|
|
|
|
}
|
|
|
|
|
2020-07-27 20:33:09 +00:00
|
|
|
// testActiveDiscovery verifies that two magicStacks tied to the given
|
|
|
|
// devices can establish a direct p2p connection with each other. See
|
|
|
|
// TestActiveDiscovery for the various configurations of devices that
|
|
|
|
// get exercised.
|
2020-07-25 00:32:18 +00:00
|
|
|
func testActiveDiscovery(t *testing.T, d *devices) {
|
|
|
|
tstest.PanicOnLog()
|
|
|
|
|
|
|
|
tlogf, setT := makeNestable(t)
|
|
|
|
setT(t)
|
|
|
|
|
|
|
|
start := time.Now()
|
2022-03-16 23:27:57 +00:00
|
|
|
wlogf := func(msg string, args ...any) {
|
2020-09-11 23:26:05 +00:00
|
|
|
t.Helper()
|
|
|
|
msg = fmt.Sprintf("%s: %s", time.Since(start).Truncate(time.Microsecond), msg)
|
2020-07-25 00:32:18 +00:00
|
|
|
tlogf(msg, args...)
|
|
|
|
}
|
2021-01-16 02:19:20 +00:00
|
|
|
logf, closeLogf := logger.LogfCloser(wlogf)
|
|
|
|
defer closeLogf()
|
2020-07-25 00:32:18 +00:00
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP)
|
|
|
|
defer cleanup()
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
m1 := newMagicStack(t, logger.WithPrefix(logf, "conn1: "), d.m1, derpMap)
|
2020-07-25 00:32:18 +00:00
|
|
|
defer m1.Close()
|
2021-08-26 02:39:20 +00:00
|
|
|
m2 := newMagicStack(t, logger.WithPrefix(logf, "conn2: "), d.m2, derpMap)
|
2020-07-25 00:32:18 +00:00
|
|
|
defer m2.Close()
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
cleanup = meshStacks(logf, nil, m1, m2)
|
2020-07-25 00:32:18 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2021-03-29 20:50:44 +00:00
|
|
|
m1IP := m1.IP()
|
|
|
|
m2IP := m2.IP()
|
2020-07-25 00:32:18 +00:00
|
|
|
logf("IPs: %s %s", m1IP, m2IP)
|
|
|
|
|
2020-07-27 20:25:25 +00:00
|
|
|
cleanup = newPinger(t, logf, m1, m2)
|
2020-07-27 15:09:54 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Everything is now up and running, active discovery should find
|
|
|
|
// a direct path between our peers. Wait for it to switch away
|
|
|
|
// from DERP.
|
2021-10-06 17:18:12 +00:00
|
|
|
mustDirect(t, logf, m1, m2)
|
|
|
|
mustDirect(t, logf, m2, m1)
|
2020-07-27 15:09:54 +00:00
|
|
|
|
2021-10-06 17:18:12 +00:00
|
|
|
logf("starting cleanup")
|
|
|
|
}
|
|
|
|
|
|
|
|
func mustDirect(t *testing.T, logf logger.Logf, m1, m2 *magicStack) {
|
|
|
|
lastLog := time.Now().Add(-time.Minute)
|
2021-11-17 21:41:43 +00:00
|
|
|
// See https://github.com/tailscale/tailscale/issues/654
|
|
|
|
// and https://github.com/tailscale/tailscale/issues/3247 for discussions of this deadline.
|
|
|
|
for deadline := time.Now().Add(30 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
|
2021-10-06 17:18:12 +00:00
|
|
|
pst := m1.Status().Peer[m2.Public()]
|
|
|
|
if pst.CurAddr != "" {
|
|
|
|
logf("direct link %s->%s found with addr %s", m1, m2, pst.CurAddr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if now := time.Now(); now.Sub(lastLog) > time.Second {
|
|
|
|
logf("no direct path %s->%s yet, addrs %v", m1, m2, pst.Addrs)
|
|
|
|
lastLog = now
|
2020-07-27 15:09:54 +00:00
|
|
|
}
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
2021-10-06 17:18:12 +00:00
|
|
|
t.Errorf("magicsock did not find a direct path from %s to %s", m1, m2)
|
2020-07-25 00:32:18 +00:00
|
|
|
}
|
|
|
|
|
2020-07-11 06:48:08 +00:00
|
|
|
func testTwoDevicePing(t *testing.T, d *devices) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
tstest.PanicOnLog()
|
2021-02-02 19:30:46 +00:00
|
|
|
tstest.ResourceCheck(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
|
|
|
|
// This gets reassigned inside every test, so that the connections
|
|
|
|
// all log using the "current" t.Logf function. Sigh.
|
2021-01-19 19:04:17 +00:00
|
|
|
nestedLogf, setT := makeNestable(t)
|
|
|
|
|
|
|
|
logf, closeLogf := logger.LogfCloser(nestedLogf)
|
|
|
|
defer closeLogf()
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
|
2020-07-24 21:19:20 +00:00
|
|
|
derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP)
|
|
|
|
defer cleanup()
|
2020-07-10 21:26:04 +00:00
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
m1 := newMagicStack(t, logf, d.m1, derpMap)
|
2020-07-24 21:19:20 +00:00
|
|
|
defer m1.Close()
|
2021-08-26 02:39:20 +00:00
|
|
|
m2 := newMagicStack(t, logf, d.m2, derpMap)
|
2020-07-24 21:19:20 +00:00
|
|
|
defer m2.Close()
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
cleanupMesh := meshStacks(logf, nil, m1, m2)
|
|
|
|
defer cleanupMesh()
|
|
|
|
|
|
|
|
// Wait for magicsock to be told about peers from meshStacks.
|
|
|
|
tstest.WaitFor(10*time.Second, func() error {
|
2021-10-28 23:56:44 +00:00
|
|
|
if p := m1.Status().Peer[m2.Public()]; p == nil || !p.InMagicSock {
|
2021-08-26 02:39:20 +00:00
|
|
|
return errors.New("m1 not ready")
|
|
|
|
}
|
2021-10-28 23:56:44 +00:00
|
|
|
if p := m2.Status().Peer[m1.Public()]; p == nil || !p.InMagicSock {
|
2021-08-26 02:39:20 +00:00
|
|
|
return errors.New("m2 not ready")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
m1cfg := &wgcfg.Config{
|
|
|
|
Name: "peer1",
|
2021-10-28 18:07:25 +00:00
|
|
|
PrivateKey: m1.privateKey,
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.1/32")},
|
2021-08-26 02:39:20 +00:00
|
|
|
Peers: []wgcfg.Peer{
|
2021-12-15 16:42:25 +00:00
|
|
|
{
|
2021-10-28 18:07:25 +00:00
|
|
|
PublicKey: m2.privateKey.Public(),
|
2021-09-01 05:37:23 +00:00
|
|
|
DiscoKey: m2.conn.DiscoPublicKey(),
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("1.0.0.2/32")},
|
2021-08-26 02:39:20 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
m2cfg := &wgcfg.Config{
|
|
|
|
Name: "peer2",
|
2021-10-28 18:07:25 +00:00
|
|
|
PrivateKey: m2.privateKey,
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.2/32")},
|
2021-08-26 02:39:20 +00:00
|
|
|
Peers: []wgcfg.Peer{
|
2021-12-15 16:42:25 +00:00
|
|
|
{
|
2021-10-28 18:07:25 +00:00
|
|
|
PublicKey: m1.privateKey.Public(),
|
2021-09-01 05:37:23 +00:00
|
|
|
DiscoKey: m1.conn.DiscoPublicKey(),
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("1.0.0.1/32")},
|
2021-08-26 02:39:20 +00:00
|
|
|
},
|
|
|
|
},
|
2020-07-10 21:26:04 +00:00
|
|
|
}
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
if err := m1.Reconfig(m1cfg); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2021-08-26 02:39:20 +00:00
|
|
|
if err := m2.Reconfig(m2cfg); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2021-02-08 20:27:35 +00:00
|
|
|
// In the normal case, pings succeed immediately.
|
|
|
|
// However, in the case of a handshake race, we need to retry.
|
2021-02-10 19:49:30 +00:00
|
|
|
// With very bad luck, we can need to retry multiple times.
|
|
|
|
allowedRetries := 3
|
|
|
|
if cibuild.On() {
|
|
|
|
// Allow extra retries on small/flaky/loaded CI machines.
|
|
|
|
allowedRetries *= 2
|
|
|
|
}
|
|
|
|
// Retries take 5s each. Add 1s for some processing time.
|
|
|
|
pingTimeout := 5*time.Second*time.Duration(allowedRetries) + time.Second
|
2021-02-08 20:27:35 +00:00
|
|
|
|
2021-02-10 22:47:26 +00:00
|
|
|
// sendWithTimeout sends msg using send, checking that it is received unchanged from in.
|
|
|
|
// It resends once per second until the send succeeds, or pingTimeout time has elapsed.
|
|
|
|
sendWithTimeout := func(msg []byte, in chan []byte, send func()) error {
|
|
|
|
start := time.Now()
|
|
|
|
for time.Since(start) < pingTimeout {
|
|
|
|
send()
|
|
|
|
select {
|
|
|
|
case recv := <-in:
|
|
|
|
if !bytes.Equal(msg, recv) {
|
|
|
|
return errors.New("ping did not transit correctly")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
// try again
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errors.New("ping timed out")
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:54:27 +00:00
|
|
|
ping1 := func(t *testing.T) {
|
2022-03-17 00:16:55 +00:00
|
|
|
msg2to1 := tuntest.Ping(netip.MustParseAddr("1.0.0.1"), netip.MustParseAddr("1.0.0.2"))
|
2021-02-10 22:47:26 +00:00
|
|
|
send := func() {
|
|
|
|
m2.tun.Outbound <- msg2to1
|
|
|
|
t.Log("ping1 sent")
|
|
|
|
}
|
|
|
|
in := m1.tun.Inbound
|
|
|
|
if err := sendWithTimeout(msg2to1, in, send); err != nil {
|
|
|
|
t.Error(err)
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ping2 := func(t *testing.T) {
|
2022-03-17 00:16:55 +00:00
|
|
|
msg1to2 := tuntest.Ping(netip.MustParseAddr("1.0.0.2"), netip.MustParseAddr("1.0.0.1"))
|
2021-02-10 22:47:26 +00:00
|
|
|
send := func() {
|
|
|
|
m1.tun.Outbound <- msg1to2
|
|
|
|
t.Log("ping2 sent")
|
|
|
|
}
|
|
|
|
in := m2.tun.Inbound
|
|
|
|
if err := sendWithTimeout(msg1to2, in, send); err != nil {
|
|
|
|
t.Error(err)
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-16 18:14:00 +00:00
|
|
|
m1.stats = connstats.NewStatistics(0, 0, nil)
|
|
|
|
defer m1.stats.Shutdown(context.Background())
|
|
|
|
m1.conn.SetStatistics(m1.stats)
|
|
|
|
m2.stats = connstats.NewStatistics(0, 0, nil)
|
|
|
|
defer m2.stats.Shutdown(context.Background())
|
|
|
|
m2.conn.SetStatistics(m2.stats)
|
2022-10-27 23:26:52 +00:00
|
|
|
|
|
|
|
checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) {
|
2022-12-16 18:14:00 +00:00
|
|
|
_, stats := m.stats.TestExtract()
|
2022-10-27 23:26:52 +00:00
|
|
|
for _, conn := range wantConns {
|
|
|
|
if _, ok := stats[conn]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Helper()
|
2023-08-17 16:40:19 +00:00
|
|
|
t.Errorf("missing any connection to %s from %s", wantConns, xmaps.Keys(stats))
|
2022-10-27 23:26:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
addrPort := netip.MustParseAddrPort
|
|
|
|
m1Conns := []netlogtype.Connection{
|
|
|
|
{Src: addrPort("1.0.0.2:0"), Dst: m2.conn.pconn4.LocalAddr().AddrPort()},
|
|
|
|
{Src: addrPort("1.0.0.2:0"), Dst: addrPort("127.3.3.40:1")},
|
|
|
|
}
|
|
|
|
m2Conns := []netlogtype.Connection{
|
|
|
|
{Src: addrPort("1.0.0.1:0"), Dst: m1.conn.pconn4.LocalAddr().AddrPort()},
|
|
|
|
{Src: addrPort("1.0.0.1:0"), Dst: addrPort("127.3.3.40:1")},
|
|
|
|
}
|
|
|
|
|
2020-06-22 08:54:59 +00:00
|
|
|
outerT := t
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
t.Run("ping 1.0.0.1", func(t *testing.T) {
|
2020-05-14 03:44:58 +00:00
|
|
|
setT(t)
|
2020-06-22 08:54:59 +00:00
|
|
|
defer setT(outerT)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
ping1(t)
|
2022-10-27 23:26:52 +00:00
|
|
|
checkStats(t, m1, m1Conns)
|
|
|
|
checkStats(t, m2, m2Conns)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ping 1.0.0.2", func(t *testing.T) {
|
2020-06-22 08:54:59 +00:00
|
|
|
setT(t)
|
|
|
|
defer setT(outerT)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
ping2(t)
|
2022-10-27 23:26:52 +00:00
|
|
|
checkStats(t, m1, m1Conns)
|
|
|
|
checkStats(t, m2, m2Conns)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
})
|
|
|
|
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Run("ping 1.0.0.2 via SendPacket", func(t *testing.T) {
|
2020-05-14 03:44:58 +00:00
|
|
|
setT(t)
|
2020-06-22 08:54:59 +00:00
|
|
|
defer setT(outerT)
|
2022-03-17 00:16:55 +00:00
|
|
|
msg1to2 := tuntest.Ping(netip.MustParseAddr("1.0.0.2"), netip.MustParseAddr("1.0.0.1"))
|
2021-02-10 22:47:26 +00:00
|
|
|
send := func() {
|
|
|
|
if err := m1.tsTun.InjectOutbound(msg1to2); err != nil {
|
|
|
|
t.Fatal(err)
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
2021-02-10 22:47:26 +00:00
|
|
|
t.Log("SendPacket sent")
|
|
|
|
}
|
|
|
|
in := m2.tun.Inbound
|
|
|
|
if err := sendWithTimeout(msg1to2, in, send); err != nil {
|
|
|
|
t.Error(err)
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
2022-10-27 23:26:52 +00:00
|
|
|
checkStats(t, m1, m1Conns)
|
|
|
|
checkStats(t, m2, m2Conns)
|
2020-03-03 15:39:40 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("no-op dev1 reconfig", func(t *testing.T) {
|
2020-05-14 03:44:58 +00:00
|
|
|
setT(t)
|
2020-06-22 08:54:59 +00:00
|
|
|
defer setT(outerT)
|
2021-08-26 02:39:20 +00:00
|
|
|
if err := m1.Reconfig(m1cfg); err != nil {
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
ping1(t)
|
|
|
|
ping2(t)
|
2022-10-27 23:26:52 +00:00
|
|
|
checkStats(t, m1, m1Conns)
|
|
|
|
checkStats(t, m2, m2Conns)
|
2020-03-03 15:39:40 +00:00
|
|
|
})
|
2024-09-25 15:20:56 +00:00
|
|
|
t.Run("compare-metrics-stats", func(t *testing.T) {
|
|
|
|
setT(t)
|
|
|
|
defer setT(outerT)
|
|
|
|
m1.conn.resetMetricsForTest()
|
|
|
|
m1.stats.TestExtract()
|
|
|
|
m2.conn.resetMetricsForTest()
|
|
|
|
m2.stats.TestExtract()
|
|
|
|
t.Logf("Metrics before: %s\n", m1.metrics.String())
|
|
|
|
ping1(t)
|
|
|
|
ping2(t)
|
|
|
|
assertConnStatsAndUserMetricsEqual(t, m1)
|
|
|
|
assertConnStatsAndUserMetricsEqual(t, m2)
|
|
|
|
t.Logf("Metrics after: %s\n", m1.metrics.String())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) resetMetricsForTest() {
|
|
|
|
c.metrics.inboundBytesIPv4Total.Set(0)
|
|
|
|
c.metrics.inboundPacketsIPv4Total.Set(0)
|
|
|
|
c.metrics.outboundBytesIPv4Total.Set(0)
|
|
|
|
c.metrics.outboundPacketsIPv4Total.Set(0)
|
|
|
|
c.metrics.inboundBytesIPv6Total.Set(0)
|
|
|
|
c.metrics.inboundPacketsIPv6Total.Set(0)
|
|
|
|
c.metrics.outboundBytesIPv6Total.Set(0)
|
|
|
|
c.metrics.outboundPacketsIPv6Total.Set(0)
|
|
|
|
c.metrics.inboundBytesDERPTotal.Set(0)
|
|
|
|
c.metrics.inboundPacketsDERPTotal.Set(0)
|
|
|
|
c.metrics.outboundBytesDERPTotal.Set(0)
|
|
|
|
c.metrics.outboundPacketsDERPTotal.Set(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) {
|
|
|
|
_, phys := ms.stats.TestExtract()
|
|
|
|
|
|
|
|
physIPv4RxBytes := int64(0)
|
|
|
|
physIPv4TxBytes := int64(0)
|
|
|
|
physDERPRxBytes := int64(0)
|
|
|
|
physDERPTxBytes := int64(0)
|
|
|
|
physIPv4RxPackets := int64(0)
|
|
|
|
physIPv4TxPackets := int64(0)
|
|
|
|
physDERPRxPackets := int64(0)
|
|
|
|
physDERPTxPackets := int64(0)
|
|
|
|
for conn, count := range phys {
|
|
|
|
t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String())
|
|
|
|
if conn.Dst.String() == "127.3.3.40:1" {
|
|
|
|
physDERPRxBytes += int64(count.RxBytes)
|
|
|
|
physDERPTxBytes += int64(count.TxBytes)
|
|
|
|
physDERPRxPackets += int64(count.RxPackets)
|
|
|
|
physDERPTxPackets += int64(count.TxPackets)
|
|
|
|
} else {
|
|
|
|
physIPv4RxBytes += int64(count.RxBytes)
|
|
|
|
physIPv4TxBytes += int64(count.TxBytes)
|
|
|
|
physIPv4RxPackets += int64(count.RxPackets)
|
|
|
|
physIPv4TxPackets += int64(count.TxPackets)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value()
|
|
|
|
metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value()
|
|
|
|
metricIPv4TxBytes := ms.conn.metrics.outboundBytesIPv4Total.Value()
|
|
|
|
metricIPv4TxPackets := ms.conn.metrics.outboundPacketsIPv4Total.Value()
|
|
|
|
|
|
|
|
metricDERPRxBytes := ms.conn.metrics.inboundBytesDERPTotal.Value()
|
|
|
|
metricDERPRxPackets := ms.conn.metrics.inboundPacketsDERPTotal.Value()
|
|
|
|
metricDERPTxBytes := ms.conn.metrics.outboundBytesDERPTotal.Value()
|
|
|
|
metricDERPTxPackets := ms.conn.metrics.outboundPacketsDERPTotal.Value()
|
|
|
|
|
|
|
|
c := qt.New(t)
|
|
|
|
c.Assert(physDERPRxBytes, qt.Equals, metricDERPRxBytes)
|
|
|
|
c.Assert(physDERPTxBytes, qt.Equals, metricDERPTxBytes)
|
|
|
|
c.Assert(physIPv4RxBytes, qt.Equals, metricIPv4RxBytes)
|
|
|
|
c.Assert(physIPv4TxBytes, qt.Equals, metricIPv4TxBytes)
|
|
|
|
c.Assert(physDERPRxPackets, qt.Equals, metricDERPRxPackets)
|
|
|
|
c.Assert(physDERPTxPackets, qt.Equals, metricDERPTxPackets)
|
|
|
|
c.Assert(physIPv4RxPackets, qt.Equals, metricIPv4RxPackets)
|
|
|
|
c.Assert(physIPv4TxPackets, qt.Equals, metricIPv4TxPackets)
|
|
|
|
|
|
|
|
// Validate that the usermetrics and clientmetrics are in sync
|
|
|
|
// Note: the clientmetrics are global, this means that when they are registering with the
|
|
|
|
// wgengine, multiple in-process nodes used by this test will be updating the same metrics. This is why we need to multiply
|
|
|
|
// the metrics by 2 to get the expected value.
|
|
|
|
// TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420
|
|
|
|
c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2)
|
|
|
|
c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2)
|
|
|
|
c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2)
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
2020-03-09 16:13:28 +00:00
|
|
|
|
2020-06-26 21:38:53 +00:00
|
|
|
func TestDiscoMessage(t *testing.T) {
|
2024-07-10 21:46:31 +00:00
|
|
|
c := newConn(t.Logf)
|
2021-10-29 20:15:27 +00:00
|
|
|
c.privateKey = key.NewNode()
|
2020-07-06 19:10:39 +00:00
|
|
|
|
|
|
|
peer1Pub := c.DiscoPublicKey()
|
|
|
|
peer1Priv := c.discoPrivate
|
2021-08-26 02:39:20 +00:00
|
|
|
n := &tailcfg.Node{
|
2021-11-02 03:55:52 +00:00
|
|
|
Key: key.NewNode().Public(),
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoKey: peer1Pub,
|
2021-08-26 02:39:20 +00:00
|
|
|
}
|
2023-04-08 22:36:47 +00:00
|
|
|
ep := &endpoint{
|
2023-09-11 17:13:00 +00:00
|
|
|
nodeID: 1,
|
2021-11-02 03:55:52 +00:00
|
|
|
publicKey: n.Key,
|
2023-04-08 22:36:47 +00:00
|
|
|
}
|
|
|
|
ep.disco.Store(&endpointDisco{
|
|
|
|
key: n.DiscoKey,
|
|
|
|
short: n.DiscoKey.ShortString(),
|
|
|
|
})
|
|
|
|
c.peerMap.upsertEndpoint(ep, key.DiscoPublic{})
|
2020-06-26 21:38:53 +00:00
|
|
|
|
|
|
|
const payload = "why hello"
|
|
|
|
|
|
|
|
var nonce [24]byte
|
|
|
|
crand.Read(nonce[:])
|
|
|
|
|
2021-10-29 21:27:29 +00:00
|
|
|
pkt := peer1Pub.AppendTo([]byte("TS💬"))
|
2020-06-26 21:38:53 +00:00
|
|
|
|
2021-10-29 21:27:29 +00:00
|
|
|
box := peer1Priv.Shared(c.discoPrivate.Public()).Seal([]byte(payload))
|
|
|
|
pkt = append(pkt, box...)
|
2023-04-04 23:32:16 +00:00
|
|
|
got := c.handleDiscoMessage(pkt, netip.AddrPort{}, key.NodePublic{}, discoRXPathUDP)
|
2020-06-26 21:38:53 +00:00
|
|
|
if !got {
|
|
|
|
t.Error("failed to open it")
|
|
|
|
}
|
|
|
|
}
|
2020-07-04 05:26:53 +00:00
|
|
|
|
2021-08-26 05:20:31 +00:00
|
|
|
// tests that having a endpoint.String prevents wireguard-go's
|
2020-07-04 05:26:53 +00:00
|
|
|
// log.Printf("%v") of its conn.Endpoint values from using reflect to
|
|
|
|
// walk into read mutex while they're being used and then causing data
|
|
|
|
// races.
|
|
|
|
func TestDiscoStringLogRace(t *testing.T) {
|
2021-08-26 05:20:31 +00:00
|
|
|
de := new(endpoint)
|
2020-07-04 05:26:53 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2022-09-15 12:06:59 +00:00
|
|
|
fmt.Fprintf(io.Discard, "%v", de)
|
2020-07-04 05:26:53 +00:00
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
de.mu.Lock()
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
2020-08-06 17:23:16 +00:00
|
|
|
|
2021-01-18 16:39:52 +00:00
|
|
|
func Test32bitAlignment(t *testing.T) {
|
2021-09-01 02:06:04 +00:00
|
|
|
// Need an associated conn with non-nil noteRecvActivity to
|
|
|
|
// trigger interesting work on the atomics in endpoint.
|
|
|
|
called := 0
|
|
|
|
de := endpoint{
|
|
|
|
c: &Conn{
|
2021-11-02 00:53:40 +00:00
|
|
|
noteRecvActivity: func(key.NodePublic) { called++ },
|
2021-09-01 02:06:04 +00:00
|
|
|
},
|
|
|
|
}
|
2021-02-20 06:15:41 +00:00
|
|
|
|
2024-01-23 17:37:32 +00:00
|
|
|
if off := unsafe.Offsetof(de.lastRecvWG); off%8 != 0 {
|
|
|
|
t.Fatalf("endpoint.lastRecvWG is not 8-byte aligned")
|
2021-02-20 06:15:41 +00:00
|
|
|
}
|
|
|
|
|
2024-01-23 17:37:32 +00:00
|
|
|
de.noteRecvActivity(netip.AddrPort{}, mono.Now()) // verify this doesn't panic on 32-bit
|
2021-09-01 02:06:04 +00:00
|
|
|
if called != 1 {
|
|
|
|
t.Fatal("expected call to noteRecvActivity")
|
2020-08-06 21:57:03 +00:00
|
|
|
}
|
2024-01-23 17:37:32 +00:00
|
|
|
de.noteRecvActivity(netip.AddrPort{}, mono.Now())
|
2021-09-01 02:06:04 +00:00
|
|
|
if called != 1 {
|
|
|
|
t.Error("expected no second call to noteRecvActivity")
|
2020-08-06 21:57:03 +00:00
|
|
|
}
|
|
|
|
}
|
2020-12-03 04:12:14 +00:00
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
// newTestConn returns a new Conn.
|
|
|
|
func newTestConn(t testing.TB) *Conn {
|
2021-02-07 05:27:02 +00:00
|
|
|
t.Helper()
|
|
|
|
port := pickPort(t)
|
2024-04-27 01:28:01 +00:00
|
|
|
|
|
|
|
netMon, err := netmon.New(logger.WithPrefix(t.Logf, "... netmon: "))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("netmon.New: %v", err)
|
|
|
|
}
|
|
|
|
t.Cleanup(func() { netMon.Close() })
|
|
|
|
|
2020-12-03 04:12:14 +00:00
|
|
|
conn, err := NewConn(Options{
|
2024-04-27 01:28:01 +00:00
|
|
|
NetMon: netMon,
|
2024-06-26 02:23:19 +00:00
|
|
|
HealthTracker: new(health.Tracker),
|
2024-09-23 16:34:00 +00:00
|
|
|
Metrics: new(usermetric.Registry),
|
2024-04-18 04:32:18 +00:00
|
|
|
DisablePortMapper: true,
|
2021-08-26 06:33:46 +00:00
|
|
|
Logf: t.Logf,
|
|
|
|
Port: port,
|
|
|
|
TestOnlyPacketListener: localhostListener{},
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
EndpointsFunc: func(eps []tailcfg.Endpoint) {
|
2021-02-07 05:27:02 +00:00
|
|
|
t.Logf("endpoints: %q", eps)
|
2020-12-03 04:12:14 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-02-07 05:27:02 +00:00
|
|
|
t.Fatal(err)
|
2020-12-03 04:12:14 +00:00
|
|
|
}
|
2021-02-07 05:27:02 +00:00
|
|
|
return conn
|
|
|
|
}
|
|
|
|
|
|
|
|
// addTestEndpoint sets conn's network map to a single peer expected
|
|
|
|
// to receive packets from sendConn (or DERP), and returns that peer's
|
|
|
|
// nodekey and discokey.
|
2021-11-02 00:53:40 +00:00
|
|
|
func addTestEndpoint(tb testing.TB, conn *Conn, sendConn net.PacketConn) (key.NodePublic, key.DiscoPublic) {
|
2021-01-15 20:50:33 +00:00
|
|
|
// Give conn just enough state that it'll recognize sendConn as a
|
|
|
|
// valid peer and not fall through to the legacy magicsock
|
|
|
|
// codepath.
|
2021-10-29 21:27:29 +00:00
|
|
|
discoKey := key.DiscoPublicFromRaw32(mem.B([]byte{31: 1}))
|
2021-10-28 21:17:08 +00:00
|
|
|
nodeKey := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 31: 0}))
|
2021-02-05 23:44:46 +00:00
|
|
|
conn.SetNetworkMap(&netmap.NetworkMap{
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2021-01-15 20:50:33 +00:00
|
|
|
{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: 1,
|
2021-11-02 03:55:52 +00:00
|
|
|
Key: nodeKey,
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoKey: discoKey,
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: eps(sendConn.LocalAddr().String()),
|
2021-01-15 20:50:33 +00:00
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2021-01-15 20:50:33 +00:00
|
|
|
})
|
2021-10-28 18:07:25 +00:00
|
|
|
conn.SetPrivateKey(key.NodePrivateFromRaw32(mem.B([]byte{0: 1, 31: 0})))
|
2021-10-28 21:17:08 +00:00
|
|
|
_, err := conn.ParseEndpoint(nodeKey.UntypedHexString())
|
2021-03-23 18:58:45 +00:00
|
|
|
if err != nil {
|
|
|
|
tb.Fatal(err)
|
|
|
|
}
|
2022-07-26 03:55:44 +00:00
|
|
|
conn.addValidDiscoPathForTest(nodeKey, netip.MustParseAddrPort(sendConn.LocalAddr().String()))
|
2021-11-02 00:53:40 +00:00
|
|
|
return nodeKey, discoKey
|
2021-02-07 05:27:02 +00:00
|
|
|
}
|
|
|
|
|
2021-02-11 21:35:06 +00:00
|
|
|
func setUpReceiveFrom(tb testing.TB) (roundTrip func()) {
|
2021-08-31 03:43:18 +00:00
|
|
|
if b, ok := tb.(*testing.B); ok {
|
|
|
|
b.ReportAllocs()
|
|
|
|
}
|
|
|
|
|
2021-08-26 02:39:20 +00:00
|
|
|
conn := newTestConn(tb)
|
2021-02-11 21:35:06 +00:00
|
|
|
tb.Cleanup(func() { conn.Close() })
|
2021-02-11 19:54:58 +00:00
|
|
|
conn.logf = logger.Discard
|
2021-02-07 05:27:02 +00:00
|
|
|
|
|
|
|
sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
2021-02-11 21:35:06 +00:00
|
|
|
tb.Fatal(err)
|
2021-02-07 05:27:02 +00:00
|
|
|
}
|
2021-02-11 21:35:06 +00:00
|
|
|
tb.Cleanup(func() { sendConn.Close() })
|
2021-02-07 05:27:02 +00:00
|
|
|
|
2021-03-23 18:58:45 +00:00
|
|
|
addTestEndpoint(tb, conn, sendConn)
|
2021-01-15 20:50:33 +00:00
|
|
|
|
2020-12-03 04:12:14 +00:00
|
|
|
var dstAddr net.Addr = conn.pconn4.LocalAddr()
|
|
|
|
sendBuf := make([]byte, 1<<10)
|
|
|
|
for i := range sendBuf {
|
|
|
|
sendBuf[i] = 'x'
|
|
|
|
}
|
2022-12-09 01:58:14 +00:00
|
|
|
buffs := make([][]byte, 1)
|
|
|
|
buffs[0] = make([]byte, 2<<10)
|
|
|
|
sizes := make([]int, 1)
|
|
|
|
eps := make([]wgconn.Endpoint, 1)
|
2023-04-14 15:09:09 +00:00
|
|
|
receiveIPv4 := conn.receiveIPv4()
|
2021-02-11 21:35:06 +00:00
|
|
|
return func() {
|
2020-12-03 04:12:14 +00:00
|
|
|
if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil {
|
2021-02-11 21:35:06 +00:00
|
|
|
tb.Fatalf("WriteTo: %v", err)
|
2020-12-03 04:12:14 +00:00
|
|
|
}
|
2023-04-14 15:09:09 +00:00
|
|
|
n, err := receiveIPv4(buffs, sizes, eps)
|
2020-12-03 04:12:14 +00:00
|
|
|
if err != nil {
|
2021-02-11 21:35:06 +00:00
|
|
|
tb.Fatal(err)
|
2020-12-03 04:12:14 +00:00
|
|
|
}
|
|
|
|
_ = n
|
2022-12-09 01:58:14 +00:00
|
|
|
_ = eps
|
2020-12-03 04:12:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-11 21:35:06 +00:00
|
|
|
// goMajorVersion reports the major Go version and whether it is a Tailscale fork.
|
|
|
|
// If parsing fails, goMajorVersion returns 0, false.
|
|
|
|
func goMajorVersion(s string) (version int, isTS bool) {
|
|
|
|
if !strings.HasPrefix(s, "go1.") {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
mm := s[len("go1."):]
|
|
|
|
var major, rest string
|
2022-03-15 20:55:24 +00:00
|
|
|
for _, sep := range []string{".", "rc", "beta", "-"} {
|
2021-02-11 21:35:06 +00:00
|
|
|
i := strings.Index(mm, sep)
|
|
|
|
if i > 0 {
|
|
|
|
major, rest = mm[:i], mm[i:]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if major == "" {
|
|
|
|
major = mm
|
|
|
|
}
|
|
|
|
n, err := strconv.Atoi(major)
|
|
|
|
if err != nil {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
return n, strings.Contains(rest, "ts")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGoMajorVersion(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
version string
|
|
|
|
wantN int
|
|
|
|
wantTS bool
|
|
|
|
}{
|
|
|
|
{"go1.15.8", 15, false},
|
|
|
|
{"go1.16rc1", 16, false},
|
|
|
|
{"go1.16rc1", 16, false},
|
|
|
|
{"go1.15.5-ts3bd89195a3", 15, true},
|
|
|
|
{"go1.15", 15, false},
|
2022-03-15 20:55:24 +00:00
|
|
|
{"go1.18-ts0d07ed810a", 18, true},
|
2021-02-11 21:35:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
n, ts := goMajorVersion(tt.version)
|
|
|
|
if tt.wantN != n || tt.wantTS != ts {
|
|
|
|
t.Errorf("goMajorVersion(%s) = %v, %v, want %v, %v", tt.version, n, ts, tt.wantN, tt.wantTS)
|
|
|
|
}
|
|
|
|
}
|
2022-03-15 20:55:24 +00:00
|
|
|
|
|
|
|
// Ensure that the current Go version is parseable.
|
|
|
|
n, _ := goMajorVersion(runtime.Version())
|
|
|
|
if n == 0 {
|
|
|
|
t.Fatalf("unable to parse %v", runtime.Version())
|
|
|
|
}
|
2021-02-11 21:35:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestReceiveFromAllocs(t *testing.T) {
|
2022-12-09 01:58:14 +00:00
|
|
|
// TODO(jwhited): we are back to nonzero alloc due to our use of x/net until
|
|
|
|
// https://github.com/golang/go/issues/45886 is implemented.
|
|
|
|
t.Skip("alloc tests are skipped until https://github.com/golang/go/issues/45886 is implemented and plumbed.")
|
2021-09-15 23:43:44 +00:00
|
|
|
if racebuild.On {
|
|
|
|
t.Skip("alloc tests are unreliable with -race")
|
|
|
|
}
|
2021-02-11 21:35:06 +00:00
|
|
|
// Go 1.16 and before: allow 3 allocs.
|
2021-10-27 23:03:18 +00:00
|
|
|
// Go 1.17: allow 2 allocs.
|
2022-03-15 22:33:06 +00:00
|
|
|
// Go 1.17, Tailscale fork: allow 1 alloc.
|
|
|
|
// Go 1.18+: allow 0 allocs.
|
|
|
|
// Go 2.0: allow -1 allocs (projected).
|
2021-02-11 21:35:06 +00:00
|
|
|
major, ts := goMajorVersion(runtime.Version())
|
|
|
|
maxAllocs := 3
|
2021-10-27 23:03:18 +00:00
|
|
|
switch {
|
2022-03-15 22:33:06 +00:00
|
|
|
case major == 17 && !ts:
|
2021-02-11 21:35:06 +00:00
|
|
|
maxAllocs = 2
|
2022-03-15 22:33:06 +00:00
|
|
|
case major == 17 && ts:
|
2021-10-27 23:03:18 +00:00
|
|
|
maxAllocs = 1
|
2022-03-15 22:33:06 +00:00
|
|
|
case major >= 18:
|
|
|
|
maxAllocs = 0
|
2021-02-11 21:35:06 +00:00
|
|
|
}
|
|
|
|
t.Logf("allowing %d allocs for Go version %q", maxAllocs, runtime.Version())
|
|
|
|
roundTrip := setUpReceiveFrom(t)
|
2021-10-27 23:21:44 +00:00
|
|
|
err := tstest.MinAllocsPerRun(t, uint64(maxAllocs), roundTrip)
|
2021-10-27 23:02:12 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2021-02-11 21:35:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkReceiveFrom(b *testing.B) {
|
|
|
|
roundTrip := setUpReceiveFrom(b)
|
2024-04-16 20:15:13 +00:00
|
|
|
for range b.N {
|
2021-02-11 21:35:06 +00:00
|
|
|
roundTrip()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-03 04:12:14 +00:00
|
|
|
func BenchmarkReceiveFrom_Native(b *testing.B) {
|
2021-08-31 03:43:18 +00:00
|
|
|
b.ReportAllocs()
|
2020-12-03 04:12:14 +00:00
|
|
|
recvConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer recvConn.Close()
|
|
|
|
recvConnUDP := recvConn.(*net.UDPConn)
|
|
|
|
|
|
|
|
sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer sendConn.Close()
|
|
|
|
|
|
|
|
var dstAddr net.Addr = recvConn.LocalAddr()
|
|
|
|
sendBuf := make([]byte, 1<<10)
|
|
|
|
for i := range sendBuf {
|
|
|
|
sendBuf[i] = 'x'
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, 2<<10)
|
2024-04-16 20:15:13 +00:00
|
|
|
for range b.N {
|
2020-12-03 04:12:14 +00:00
|
|
|
if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil {
|
|
|
|
b.Fatalf("WriteTo: %v", err)
|
|
|
|
}
|
|
|
|
if _, _, err := recvConnUDP.ReadFromUDP(buf); err != nil {
|
|
|
|
b.Fatalf("ReadFromUDP: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-02-23 22:26:29 +00:00
|
|
|
|
2023-08-18 14:57:44 +00:00
|
|
|
func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView {
|
|
|
|
nv := make([]tailcfg.NodeView, len(v))
|
|
|
|
for i, n := range v {
|
|
|
|
nv[i] = n.View()
|
|
|
|
}
|
|
|
|
return nv
|
|
|
|
}
|
|
|
|
|
2021-02-23 22:26:29 +00:00
|
|
|
// Test that a netmap update where node changes its node key but
|
|
|
|
// doesn't change its disco key doesn't result in a broken state.
|
|
|
|
//
|
|
|
|
// https://github.com/tailscale/tailscale/issues/1391
|
|
|
|
func TestSetNetworkMapChangingNodeKey(t *testing.T) {
|
2021-08-26 02:39:20 +00:00
|
|
|
conn := newTestConn(t)
|
2021-02-23 22:26:29 +00:00
|
|
|
t.Cleanup(func() { conn.Close() })
|
2021-09-08 02:27:19 +00:00
|
|
|
var buf tstest.MemLogger
|
2021-09-06 00:41:23 +00:00
|
|
|
conn.logf = buf.Logf
|
2021-02-23 22:26:29 +00:00
|
|
|
|
2021-10-28 18:07:25 +00:00
|
|
|
conn.SetPrivateKey(key.NodePrivateFromRaw32(mem.B([]byte{0: 1, 31: 0})))
|
2021-02-23 22:26:29 +00:00
|
|
|
|
2021-10-29 21:27:29 +00:00
|
|
|
discoKey := key.DiscoPublicFromRaw32(mem.B([]byte{31: 1}))
|
2021-11-02 00:53:40 +00:00
|
|
|
nodeKey1 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '1', 31: 0}))
|
|
|
|
nodeKey2 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '2', 31: 0}))
|
2021-02-23 22:26:29 +00:00
|
|
|
|
|
|
|
conn.SetNetworkMap(&netmap.NetworkMap{
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2021-02-23 22:26:29 +00:00
|
|
|
{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: 1,
|
2021-11-02 03:55:52 +00:00
|
|
|
Key: nodeKey1,
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoKey: discoKey,
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: eps("192.168.1.2:345"),
|
2021-02-23 22:26:29 +00:00
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2021-02-23 22:26:29 +00:00
|
|
|
})
|
2021-11-02 00:53:40 +00:00
|
|
|
_, err := conn.ParseEndpoint(nodeKey1.UntypedHexString())
|
2021-02-23 22:26:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 3 {
|
2021-02-23 22:26:29 +00:00
|
|
|
conn.SetNetworkMap(&netmap.NetworkMap{
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2021-02-23 22:26:29 +00:00
|
|
|
{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: 2,
|
2021-11-02 03:55:52 +00:00
|
|
|
Key: nodeKey2,
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoKey: discoKey,
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: eps("192.168.1.2:345"),
|
2021-02-23 22:26:29 +00:00
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2021-02-23 22:26:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-10-15 23:42:24 +00:00
|
|
|
de, ok := conn.peerMap.endpointForNodeKey(nodeKey2)
|
2021-08-26 02:39:20 +00:00
|
|
|
if ok && de.publicKey != nodeKey2 {
|
2021-11-02 00:53:40 +00:00
|
|
|
t.Fatalf("discoEndpoint public key = %q; want %q", de.publicKey, nodeKey2)
|
2021-02-23 22:26:29 +00:00
|
|
|
}
|
2023-04-08 22:36:47 +00:00
|
|
|
deDisco := de.disco.Load()
|
|
|
|
if deDisco == nil {
|
|
|
|
t.Fatalf("discoEndpoint disco is nil")
|
|
|
|
}
|
|
|
|
if deDisco.key != discoKey {
|
|
|
|
t.Errorf("discoKey = %v; want %v", deDisco.key, discoKey)
|
2021-10-15 23:42:24 +00:00
|
|
|
}
|
|
|
|
if _, ok := conn.peerMap.endpointForNodeKey(nodeKey1); ok {
|
|
|
|
t.Errorf("didn't expect to find node for key1")
|
|
|
|
}
|
2021-02-23 22:26:29 +00:00
|
|
|
|
2021-09-06 00:41:23 +00:00
|
|
|
log := buf.String()
|
2021-02-23 22:26:29 +00:00
|
|
|
wantSub := map[string]int{
|
2021-08-26 02:39:20 +00:00
|
|
|
"magicsock: got updated network map; 1 peers": 2,
|
2021-02-23 22:26:29 +00:00
|
|
|
}
|
|
|
|
for sub, want := range wantSub {
|
|
|
|
got := strings.Count(log, sub)
|
|
|
|
if got != want {
|
|
|
|
t.Errorf("in log, count of substring %q = %v; want %v", sub, got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if t.Failed() {
|
|
|
|
t.Logf("log output: %s", log)
|
|
|
|
}
|
|
|
|
}
|
2021-03-08 23:48:49 +00:00
|
|
|
|
|
|
|
func TestRebindStress(t *testing.T) {
|
2021-08-26 02:39:20 +00:00
|
|
|
conn := newTestConn(t)
|
2021-03-08 23:48:49 +00:00
|
|
|
|
2021-09-08 02:27:19 +00:00
|
|
|
var buf tstest.MemLogger
|
2021-09-06 00:41:23 +00:00
|
|
|
conn.logf = buf.Logf
|
2021-03-08 23:48:49 +00:00
|
|
|
|
|
|
|
closed := false
|
|
|
|
t.Cleanup(func() {
|
|
|
|
if !closed {
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
2022-12-09 01:58:14 +00:00
|
|
|
buffs := make([][]byte, 1)
|
|
|
|
sizes := make([]int, 1)
|
|
|
|
eps := make([]wgconn.Endpoint, 1)
|
|
|
|
buffs[0] = make([]byte, 1500)
|
2023-04-14 15:09:09 +00:00
|
|
|
receiveIPv4 := conn.receiveIPv4()
|
2021-03-08 23:48:49 +00:00
|
|
|
for {
|
2023-04-14 15:09:09 +00:00
|
|
|
_, err := receiveIPv4(buffs, sizes, eps)
|
2021-03-08 23:48:49 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
errc <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 2000 {
|
2021-03-08 23:48:49 +00:00
|
|
|
conn.Rebind()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 2000 {
|
2021-03-08 23:48:49 +00:00
|
|
|
conn.Rebind()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
cancel()
|
|
|
|
if err := conn.Close(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
closed = true
|
|
|
|
|
|
|
|
err := <-errc
|
|
|
|
if err != nil {
|
2021-09-06 00:41:23 +00:00
|
|
|
t.Fatalf("Got ReceiveIPv4 error: %v (is closed = %v). Log:\n%s", err, errors.Is(err, net.ErrClosed), buf.String())
|
2021-03-08 23:48:49 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-22 17:23:26 +00:00
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func TestEndpointSetsEqual(t *testing.T) {
|
|
|
|
s := func(ports ...uint16) (ret []tailcfg.Endpoint) {
|
|
|
|
for _, port := range ports {
|
|
|
|
ret = append(ret, tailcfg.Endpoint{
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
Addr: netip.AddrPortFrom(netip.Addr{}, port),
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
})
|
2021-03-22 17:23:26 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tests := []struct {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
a, b []tailcfg.Endpoint
|
2021-03-22 17:23:26 +00:00
|
|
|
want bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2, 3),
|
|
|
|
b: s(1, 2, 3),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2),
|
|
|
|
b: s(2, 1),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2),
|
|
|
|
b: s(2, 1, 1),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2, 2),
|
|
|
|
b: s(2, 1),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2, 2),
|
|
|
|
b: s(2, 1, 1),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2, 2, 3),
|
|
|
|
b: s(2, 1, 1),
|
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: s(1, 2, 2),
|
|
|
|
b: s(2, 1, 1, 3),
|
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if got := endpointSetsEqual(tt.a, tt.b); got != tt.want {
|
2021-03-22 17:23:26 +00:00
|
|
|
t.Errorf("%q vs %q = %v; want %v", tt.a, tt.b, got, tt.want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2021-03-23 17:07:34 +00:00
|
|
|
|
|
|
|
func TestBetterAddr(t *testing.T) {
|
|
|
|
const ms = time.Millisecond
|
2023-09-20 12:44:10 +00:00
|
|
|
al := func(ipps string, d time.Duration) addrQuality {
|
|
|
|
return addrQuality{AddrPort: netip.MustParseAddrPort(ipps), latency: d}
|
2021-03-23 17:07:34 +00:00
|
|
|
}
|
2023-09-20 12:44:10 +00:00
|
|
|
almtu := func(ipps string, d time.Duration, mtu tstun.WireMTU) addrQuality {
|
|
|
|
return addrQuality{AddrPort: netip.MustParseAddrPort(ipps), latency: d, wireMTU: mtu}
|
|
|
|
}
|
|
|
|
zero := addrQuality{}
|
2023-05-08 13:55:14 +00:00
|
|
|
|
|
|
|
const (
|
|
|
|
publicV4 = "1.2.3.4:555"
|
|
|
|
publicV4_2 = "5.6.7.8:999"
|
|
|
|
publicV6 = "[2001::5]:123"
|
|
|
|
|
|
|
|
privateV4 = "10.0.0.2:123"
|
|
|
|
)
|
|
|
|
|
2021-03-23 17:07:34 +00:00
|
|
|
tests := []struct {
|
2023-09-20 12:44:10 +00:00
|
|
|
a, b addrQuality
|
2023-05-08 13:55:14 +00:00
|
|
|
want bool // whether a is better than b
|
2021-03-23 17:07:34 +00:00
|
|
|
}{
|
|
|
|
{a: zero, b: zero, want: false},
|
2023-05-08 13:55:14 +00:00
|
|
|
{a: al(publicV4, 5*ms), b: zero, want: true},
|
|
|
|
{a: zero, b: al(publicV4, 5*ms), want: false},
|
|
|
|
{a: al(publicV4, 5*ms), b: al(publicV4_2, 10*ms), want: true},
|
|
|
|
{a: al(publicV4, 5*ms), b: al(publicV4, 10*ms), want: false}, // same IPPort
|
2021-03-23 17:17:19 +00:00
|
|
|
|
2023-04-21 15:35:16 +00:00
|
|
|
// Don't prefer b to a if it's not substantially better.
|
2023-05-08 13:55:14 +00:00
|
|
|
{a: al(publicV4, 100*ms), b: al(publicV4_2, 100*ms), want: false},
|
|
|
|
{a: al(publicV4, 100*ms), b: al(publicV4_2, 101*ms), want: false},
|
|
|
|
{a: al(publicV4, 100*ms), b: al(publicV4_2, 103*ms), want: true},
|
|
|
|
|
|
|
|
// Latencies of zero don't result in a divide-by-zero
|
|
|
|
{a: al(publicV4, 0), b: al(publicV4_2, 0), want: false},
|
|
|
|
|
|
|
|
// Prefer private IPs to public IPs if roughly equivalent...
|
|
|
|
{
|
|
|
|
a: al(privateV4, 100*ms),
|
|
|
|
b: al(publicV4, 91*ms),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: al(publicV4, 91*ms),
|
|
|
|
b: al(privateV4, 100*ms),
|
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
// ... but not if the private IP is slower.
|
|
|
|
{
|
|
|
|
a: al(privateV4, 100*ms),
|
|
|
|
b: al(publicV4, 30*ms),
|
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: al(publicV4, 30*ms),
|
|
|
|
b: al(privateV4, 100*ms),
|
|
|
|
want: true,
|
|
|
|
},
|
2023-04-21 15:35:16 +00:00
|
|
|
|
2021-03-23 17:17:19 +00:00
|
|
|
// Prefer IPv6 if roughly equivalent:
|
|
|
|
{
|
2023-05-08 13:55:14 +00:00
|
|
|
a: al(publicV6, 100*ms),
|
|
|
|
b: al(publicV4, 91*ms),
|
2021-03-23 17:17:19 +00:00
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
2023-05-08 13:55:14 +00:00
|
|
|
a: al(publicV4, 91*ms),
|
|
|
|
b: al(publicV6, 100*ms),
|
2021-03-23 17:17:19 +00:00
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
// But not if IPv4 is much faster:
|
|
|
|
{
|
2023-05-08 13:55:14 +00:00
|
|
|
a: al(publicV6, 100*ms),
|
|
|
|
b: al(publicV4, 30*ms),
|
2021-03-23 17:17:19 +00:00
|
|
|
want: false,
|
|
|
|
},
|
|
|
|
{
|
2023-05-08 13:55:14 +00:00
|
|
|
a: al(publicV4, 30*ms),
|
|
|
|
b: al(publicV6, 100*ms),
|
2021-03-23 17:17:19 +00:00
|
|
|
want: true,
|
|
|
|
},
|
2023-09-20 12:44:10 +00:00
|
|
|
// If addresses are equal, prefer larger MTU
|
|
|
|
{
|
|
|
|
a: almtu(publicV4, 30*ms, 1500),
|
|
|
|
b: almtu(publicV4, 30*ms, 0),
|
|
|
|
want: true,
|
|
|
|
},
|
2023-05-08 13:55:14 +00:00
|
|
|
// Private IPs are preferred over public IPs even if the public
|
|
|
|
// IP is IPv6.
|
|
|
|
{
|
|
|
|
a: al("192.168.0.1:555", 100*ms),
|
|
|
|
b: al("[2001::5]:123", 101*ms),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: al("[2001::5]:123", 101*ms),
|
|
|
|
b: al("192.168.0.1:555", 100*ms),
|
|
|
|
want: false,
|
|
|
|
},
|
2024-03-05 23:53:14 +00:00
|
|
|
|
|
|
|
// Link-local unicast addresses are preferred over other
|
|
|
|
// private IPs, but not as much as localhost addresses.
|
|
|
|
{
|
|
|
|
a: al("[fe80::ce8:474a:a27e:113b]:555", 101*ms),
|
|
|
|
b: al("[fd89:1a8a:8888:9999:aaaa:bbbb:cccc:dddd]:555", 100*ms),
|
|
|
|
want: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
a: al("[fe80::ce8:474a:a27e:113b]:555", 101*ms),
|
|
|
|
b: al("[::1]:555", 100*ms),
|
|
|
|
want: false,
|
|
|
|
},
|
2021-03-23 17:07:34 +00:00
|
|
|
}
|
2023-05-08 13:55:14 +00:00
|
|
|
for i, tt := range tests {
|
2021-03-23 17:07:34 +00:00
|
|
|
got := betterAddr(tt.a, tt.b)
|
|
|
|
if got != tt.want {
|
2023-05-08 13:55:14 +00:00
|
|
|
t.Errorf("[%d] betterAddr(%+v, %+v) = %v; want %v", i, tt.a, tt.b, got, tt.want)
|
2021-03-23 17:07:34 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
gotBack := betterAddr(tt.b, tt.a)
|
|
|
|
if got && gotBack {
|
2023-05-08 13:55:14 +00:00
|
|
|
t.Errorf("[%d] betterAddr(%+v, %+v) and betterAddr(%+v, %+v) both unexpectedly true", i, tt.a, tt.b, tt.b, tt.a)
|
2021-03-23 17:07:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
|
2023-10-01 04:05:02 +00:00
|
|
|
func epFromTyped(eps []tailcfg.Endpoint) (ret []netip.AddrPort) {
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
for _, ep := range eps {
|
2023-10-01 04:05:02 +00:00
|
|
|
ret = append(ret, ep.Addr)
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2021-11-10 20:09:54 +00:00
|
|
|
|
2023-10-01 04:05:02 +00:00
|
|
|
func eps(s ...string) []netip.AddrPort {
|
|
|
|
var eps []netip.AddrPort
|
|
|
|
for _, ep := range s {
|
|
|
|
eps = append(eps, netip.MustParseAddrPort(ep))
|
|
|
|
}
|
|
|
|
return eps
|
|
|
|
}
|
|
|
|
|
2021-11-10 20:09:54 +00:00
|
|
|
func TestStressSetNetworkMap(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
conn := newTestConn(t)
|
|
|
|
t.Cleanup(func() { conn.Close() })
|
|
|
|
var buf tstest.MemLogger
|
|
|
|
conn.logf = buf.Logf
|
|
|
|
|
|
|
|
conn.SetPrivateKey(key.NewNode())
|
|
|
|
|
|
|
|
const npeers = 5
|
|
|
|
present := make([]bool, npeers)
|
|
|
|
allPeers := make([]*tailcfg.Node, npeers)
|
|
|
|
for i := range allPeers {
|
|
|
|
present[i] = true
|
|
|
|
allPeers[i] = &tailcfg.Node{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: tailcfg.NodeID(i) + 1,
|
2021-11-10 20:09:54 +00:00
|
|
|
DiscoKey: randDiscoKey(),
|
|
|
|
Key: randNodeKey(),
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: eps(fmt.Sprintf("192.168.1.2:%d", i)),
|
2021-11-10 20:09:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get a PRNG seed. If not provided, generate a new one to get extra coverage.
|
|
|
|
seed, err := strconv.ParseUint(os.Getenv("TS_STRESS_SET_NETWORK_MAP_SEED"), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
var buf [8]byte
|
|
|
|
crand.Read(buf[:])
|
|
|
|
seed = binary.LittleEndian.Uint64(buf[:])
|
|
|
|
}
|
|
|
|
t.Logf("TS_STRESS_SET_NETWORK_MAP_SEED=%d", seed)
|
|
|
|
prng := rand.New(rand.NewSource(int64(seed)))
|
|
|
|
|
|
|
|
const iters = 1000 // approx 0.5s on an m1 mac
|
2024-04-16 20:15:13 +00:00
|
|
|
for range iters {
|
2021-11-10 20:09:54 +00:00
|
|
|
for j := 0; j < npeers; j++ {
|
|
|
|
// Randomize which peers are present.
|
|
|
|
if prng.Int()&1 == 0 {
|
|
|
|
present[j] = !present[j]
|
|
|
|
}
|
|
|
|
// Randomize some peer disco keys and node keys.
|
|
|
|
if prng.Int()&1 == 0 {
|
|
|
|
allPeers[j].DiscoKey = randDiscoKey()
|
|
|
|
}
|
|
|
|
if prng.Int()&1 == 0 {
|
|
|
|
allPeers[j].Key = randNodeKey()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Clone existing peers into a new netmap.
|
|
|
|
peers := make([]*tailcfg.Node, 0, len(allPeers))
|
|
|
|
for peerIdx, p := range allPeers {
|
|
|
|
if present[peerIdx] {
|
|
|
|
peers = append(peers, p.Clone())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Set the netmap.
|
|
|
|
conn.SetNetworkMap(&netmap.NetworkMap{
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews(peers),
|
2021-11-10 20:09:54 +00:00
|
|
|
})
|
|
|
|
// Check invariants.
|
|
|
|
if err := conn.peerMap.validate(); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func randDiscoKey() (k key.DiscoPublic) { return key.NewDisco().Public() }
|
|
|
|
func randNodeKey() (k key.NodePublic) { return key.NewNode().Public() }
|
|
|
|
|
|
|
|
// validate checks m for internal consistency and reports the first error encountered.
|
|
|
|
// It is used in tests only, so it doesn't need to be efficient.
|
|
|
|
func (m *peerMap) validate() error {
|
|
|
|
seenEps := make(map[*endpoint]bool)
|
|
|
|
for pub, pi := range m.byNodeKey {
|
|
|
|
if got := pi.ep.publicKey; got != pub {
|
|
|
|
return fmt.Errorf("byNodeKey[%v].publicKey = %v", pub, got)
|
|
|
|
}
|
|
|
|
if _, ok := seenEps[pi.ep]; ok {
|
|
|
|
return fmt.Errorf("duplicate endpoint present: %v", pi.ep.publicKey)
|
|
|
|
}
|
|
|
|
seenEps[pi.ep] = true
|
2023-09-11 17:13:00 +00:00
|
|
|
for ipp := range pi.ipPorts {
|
2021-11-10 20:09:54 +00:00
|
|
|
if got := m.byIPPort[ipp]; got != pi {
|
|
|
|
return fmt.Errorf("m.byIPPort[%v] = %v, want %v", ipp, got, pi)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-09-11 17:13:00 +00:00
|
|
|
if len(m.byNodeKey) != len(m.byNodeID) {
|
|
|
|
return fmt.Errorf("len(m.byNodeKey)=%d != len(m.byNodeID)=%d", len(m.byNodeKey), len(m.byNodeID))
|
|
|
|
}
|
|
|
|
for nodeID, pi := range m.byNodeID {
|
|
|
|
ep := pi.ep
|
|
|
|
if pi2, ok := m.byNodeKey[ep.publicKey]; !ok {
|
|
|
|
return fmt.Errorf("nodeID %d in map with publicKey %v that's missing from map", nodeID, ep.publicKey)
|
|
|
|
} else if pi2 != pi {
|
|
|
|
return fmt.Errorf("nodeID %d in map with publicKey %v that points to different endpoint", nodeID, ep.publicKey)
|
|
|
|
}
|
|
|
|
}
|
2021-11-10 20:09:54 +00:00
|
|
|
|
|
|
|
for ipp, pi := range m.byIPPort {
|
2023-09-11 17:13:00 +00:00
|
|
|
if !pi.ipPorts.Contains(ipp) {
|
2021-11-10 20:09:54 +00:00
|
|
|
return fmt.Errorf("ipPorts[%v] for %v is false", ipp, pi.ep.publicKey)
|
|
|
|
}
|
|
|
|
pi2 := m.byNodeKey[pi.ep.publicKey]
|
|
|
|
if pi != pi2 {
|
|
|
|
return fmt.Errorf("byNodeKey[%v]=%p doesn't match byIPPort[%v]=%p", pi, pi, pi.ep.publicKey, pi2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
publicToDisco := make(map[key.NodePublic]key.DiscoPublic)
|
|
|
|
for disco, nodes := range m.nodesOfDisco {
|
2023-09-11 17:13:00 +00:00
|
|
|
for pub := range nodes {
|
2021-11-10 20:09:54 +00:00
|
|
|
if _, ok := m.byNodeKey[pub]; !ok {
|
|
|
|
return fmt.Errorf("nodesOfDisco refers to public key %v, which is not present in byNodeKey", pub)
|
|
|
|
}
|
|
|
|
if _, ok := publicToDisco[pub]; ok {
|
|
|
|
return fmt.Errorf("publicKey %v refers to multiple disco keys", pub)
|
|
|
|
}
|
|
|
|
publicToDisco[pub] = disco
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-01-06 21:19:09 +00:00
|
|
|
|
|
|
|
func TestBlockForeverConnUnblocks(t *testing.T) {
|
|
|
|
c := newBlockForeverConn()
|
|
|
|
done := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
defer close(done)
|
2023-04-15 20:08:16 +00:00
|
|
|
_, _, err := c.ReadFromUDPAddrPort(make([]byte, 1))
|
2022-01-06 21:19:09 +00:00
|
|
|
done <- err
|
|
|
|
}()
|
|
|
|
time.Sleep(50 * time.Millisecond) // give ReadFrom time to get blocked
|
|
|
|
if err := c.Close(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
timer := time.NewTimer(5 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
select {
|
|
|
|
case err := <-done:
|
|
|
|
if err != net.ErrClosed {
|
|
|
|
t.Errorf("got %v; want net.ErrClosed", err)
|
|
|
|
}
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatal("timeout")
|
|
|
|
}
|
|
|
|
}
|
2022-08-29 14:57:54 +00:00
|
|
|
|
|
|
|
func TestDiscoMagicMatches(t *testing.T) {
|
|
|
|
// Convert our disco magic number into a uint32 and uint16 to test
|
|
|
|
// against. We panic on an incorrect length here rather than try to be
|
|
|
|
// generic with our BPF instructions below.
|
|
|
|
//
|
|
|
|
// Note that BPF uses network byte order (big-endian) when loading data
|
|
|
|
// from a packet, so that is what we use to generate our magic numbers.
|
|
|
|
if len(disco.Magic) != 6 {
|
|
|
|
t.Fatalf("expected disco.Magic to be of length 6")
|
|
|
|
}
|
|
|
|
if m1 := binary.BigEndian.Uint32([]byte(disco.Magic[:4])); m1 != discoMagic1 {
|
|
|
|
t.Errorf("first 4 bytes of disco magic don't match, got %v want %v", discoMagic1, m1)
|
|
|
|
}
|
|
|
|
if m2 := binary.BigEndian.Uint16([]byte(disco.Magic[4:6])); m2 != discoMagic2 {
|
|
|
|
t.Errorf("last 2 bytes of disco magic don't match, got %v want %v", discoMagic2, m2)
|
|
|
|
}
|
|
|
|
}
|
2022-12-07 01:42:40 +00:00
|
|
|
|
|
|
|
func TestRebindingUDPConn(t *testing.T) {
|
|
|
|
// Test that RebindingUDPConn can be re-bound to different connection
|
|
|
|
// types.
|
|
|
|
c := RebindingUDPConn{}
|
|
|
|
realConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer realConn.Close()
|
2023-04-04 23:32:16 +00:00
|
|
|
c.setConnLocked(realConn.(nettype.PacketConn), "udp4", 1)
|
|
|
|
c.setConnLocked(newBlockForeverConn(), "", 1)
|
2022-12-07 01:42:40 +00:00
|
|
|
}
|
2023-01-24 22:03:57 +00:00
|
|
|
|
|
|
|
// https://github.com/tailscale/tailscale/issues/6680: don't ignore
|
|
|
|
// SetNetworkMap calls when there are no peers. (A too aggressive fast path was
|
|
|
|
// previously bailing out early, thinking there were no changes since all zero
|
|
|
|
// peers didn't change, but the netmap has non-peer info in it too we shouldn't discard)
|
|
|
|
func TestSetNetworkMapWithNoPeers(t *testing.T) {
|
|
|
|
var c Conn
|
2023-09-12 02:17:24 +00:00
|
|
|
knobs := &controlknobs.Knobs{}
|
2023-01-24 22:03:57 +00:00
|
|
|
c.logf = logger.Discard
|
2023-09-12 02:17:24 +00:00
|
|
|
c.controlKnobs = knobs // TODO(bradfitz): move silent disco bool to controlknobs
|
2023-01-24 22:03:57 +00:00
|
|
|
|
|
|
|
for i := 1; i <= 3; i++ {
|
2023-09-12 02:17:24 +00:00
|
|
|
v := !debugEnableSilentDisco()
|
|
|
|
envknob.Setenv("TS_DEBUG_ENABLE_SILENT_DISCO", fmt.Sprint(v))
|
2023-01-24 22:03:57 +00:00
|
|
|
nm := &netmap.NetworkMap{}
|
|
|
|
c.SetNetworkMap(nm)
|
|
|
|
t.Logf("ptr %d: %p", i, nm)
|
2023-09-12 02:17:24 +00:00
|
|
|
if c.lastFlags.heartbeatDisabled != v {
|
2023-01-24 22:03:57 +00:00
|
|
|
t.Fatalf("call %d: didn't store netmap", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-03 20:08:29 +00:00
|
|
|
|
|
|
|
func TestBufferedDerpWritesBeforeDrop(t *testing.T) {
|
|
|
|
vv := bufferedDerpWritesBeforeDrop()
|
|
|
|
if vv < 32 {
|
|
|
|
t.Fatalf("got bufferedDerpWritesBeforeDrop=%d, which is < 32", vv)
|
|
|
|
}
|
|
|
|
t.Logf("bufferedDerpWritesBeforeDrop = %d", vv)
|
|
|
|
}
|
2023-04-04 23:32:16 +00:00
|
|
|
|
2023-04-06 00:28:28 +00:00
|
|
|
// newWireguard starts up a new wireguard-go device attached to a test tun, and
|
2023-05-03 00:49:56 +00:00
|
|
|
// returns the device, tun and endpoint port. To add peers call device.IpcSet with UAPI instructions.
|
|
|
|
func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Device, *tuntest.ChannelTUN, uint16) {
|
2023-04-06 00:28:28 +00:00
|
|
|
wgtun := tuntest.NewChannelTUN()
|
|
|
|
wglogf := func(f string, args ...any) {
|
|
|
|
t.Logf("wg-go: "+f, args...)
|
|
|
|
}
|
|
|
|
wglog := device.Logger{
|
|
|
|
Verbosef: func(string, ...any) {},
|
|
|
|
Errorf: wglogf,
|
|
|
|
}
|
|
|
|
wgdev := wgcfg.NewDevice(wgtun.TUN(), wgconn.NewDefaultBind(), &wglog)
|
|
|
|
|
|
|
|
if err := wgdev.IpcSet(uapi); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := wgdev.Up(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2023-05-03 00:49:56 +00:00
|
|
|
var port uint16
|
2023-04-06 00:28:28 +00:00
|
|
|
s, err := wgdev.IpcGet()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for _, line := range strings.Split(s, "\n") {
|
|
|
|
line = strings.TrimSpace(line)
|
|
|
|
if len(line) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
k, v, _ := strings.Cut(line, "=")
|
|
|
|
if k == "listen_port" {
|
2023-05-03 00:49:56 +00:00
|
|
|
p, err := strconv.ParseUint(v, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
port = uint16(p)
|
2023-04-06 00:28:28 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-03 00:49:56 +00:00
|
|
|
return wgdev, wgtun, port
|
2023-04-06 00:28:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestIsWireGuardOnlyPeer(t *testing.T) {
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1))
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
tskey := key.NewNode()
|
|
|
|
tsaip := netip.MustParsePrefix("100.111.222.111/32")
|
|
|
|
|
|
|
|
wgkey := key.NewNode()
|
|
|
|
wgaip := netip.MustParsePrefix("100.222.111.222/32")
|
|
|
|
|
|
|
|
uapi := fmt.Sprintf("private_key=%s\npublic_key=%s\nallowed_ip=%s\n\n",
|
|
|
|
wgkey.UntypedHexString(), tskey.Public().UntypedHexString(), tsaip.String())
|
2023-05-03 00:49:56 +00:00
|
|
|
wgdev, wgtun, port := newWireguard(t, uapi, []netip.Prefix{wgaip})
|
2023-04-06 00:28:28 +00:00
|
|
|
defer wgdev.Close()
|
2023-05-03 00:49:56 +00:00
|
|
|
wgEp := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), port)
|
2023-04-06 00:28:28 +00:00
|
|
|
|
|
|
|
m := newMagicStackWithKey(t, t.Logf, localhostListener{}, derpMap, tskey)
|
|
|
|
defer m.Close()
|
|
|
|
|
|
|
|
nm := &netmap.NetworkMap{
|
|
|
|
Name: "ts",
|
|
|
|
PrivateKey: m.privateKey,
|
|
|
|
NodeKey: m.privateKey.Public(),
|
2023-09-18 06:31:34 +00:00
|
|
|
SelfNode: (&tailcfg.Node{
|
|
|
|
Addresses: []netip.Prefix{tsaip},
|
|
|
|
}).View(),
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2023-04-06 00:28:28 +00:00
|
|
|
{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: 1,
|
2023-04-06 00:28:28 +00:00
|
|
|
Key: wgkey.Public(),
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: []netip.AddrPort{wgEp},
|
2023-04-06 00:28:28 +00:00
|
|
|
IsWireGuardOnly: true,
|
|
|
|
Addresses: []netip.Prefix{wgaip},
|
|
|
|
AllowedIPs: []netip.Prefix{wgaip},
|
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2023-04-06 00:28:28 +00:00
|
|
|
}
|
|
|
|
m.conn.SetNetworkMap(nm)
|
|
|
|
|
2024-05-17 20:47:57 +00:00
|
|
|
cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "")
|
2023-04-06 00:28:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
m.Reconfig(cfg)
|
|
|
|
|
|
|
|
pbuf := tuntest.Ping(wgaip.Addr(), tsaip.Addr())
|
|
|
|
m.tun.Outbound <- pbuf
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p := <-wgtun.Inbound:
|
|
|
|
if !bytes.Equal(p, pbuf) {
|
|
|
|
t.Errorf("got unexpected packet: %x", p)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatal("no packet after 1s")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) {
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1))
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
tskey := key.NewNode()
|
|
|
|
tsaip := netip.MustParsePrefix("100.111.222.111/32")
|
|
|
|
|
|
|
|
wgkey := key.NewNode()
|
|
|
|
wgaip := netip.MustParsePrefix("10.64.0.1/32")
|
|
|
|
|
|
|
|
// the ip that the wireguard peer has in allowed ips and expects as a masq source
|
|
|
|
masqip := netip.MustParsePrefix("10.64.0.2/32")
|
|
|
|
|
|
|
|
uapi := fmt.Sprintf("private_key=%s\npublic_key=%s\nallowed_ip=%s\n\n",
|
|
|
|
wgkey.UntypedHexString(), tskey.Public().UntypedHexString(), masqip.String())
|
2023-05-03 00:49:56 +00:00
|
|
|
wgdev, wgtun, port := newWireguard(t, uapi, []netip.Prefix{wgaip})
|
2023-04-06 00:28:28 +00:00
|
|
|
defer wgdev.Close()
|
2023-05-03 00:49:56 +00:00
|
|
|
wgEp := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), port)
|
2023-04-06 00:28:28 +00:00
|
|
|
|
|
|
|
m := newMagicStackWithKey(t, t.Logf, localhostListener{}, derpMap, tskey)
|
|
|
|
defer m.Close()
|
|
|
|
|
|
|
|
nm := &netmap.NetworkMap{
|
|
|
|
Name: "ts",
|
|
|
|
PrivateKey: m.privateKey,
|
|
|
|
NodeKey: m.privateKey.Public(),
|
2023-09-18 06:31:34 +00:00
|
|
|
SelfNode: (&tailcfg.Node{
|
|
|
|
Addresses: []netip.Prefix{tsaip},
|
|
|
|
}).View(),
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2023-04-06 00:28:28 +00:00
|
|
|
{
|
2023-09-11 17:13:00 +00:00
|
|
|
ID: 1,
|
2023-04-06 00:28:28 +00:00
|
|
|
Key: wgkey.Public(),
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: []netip.AddrPort{wgEp},
|
2023-04-06 00:28:28 +00:00
|
|
|
IsWireGuardOnly: true,
|
|
|
|
Addresses: []netip.Prefix{wgaip},
|
|
|
|
AllowedIPs: []netip.Prefix{wgaip},
|
2023-04-13 17:12:31 +00:00
|
|
|
SelfNodeV4MasqAddrForThisPeer: ptr.To(masqip.Addr()),
|
2023-04-06 00:28:28 +00:00
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2023-04-06 00:28:28 +00:00
|
|
|
}
|
|
|
|
m.conn.SetNetworkMap(nm)
|
|
|
|
|
2024-05-17 20:47:57 +00:00
|
|
|
cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "")
|
2023-04-06 00:28:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
m.Reconfig(cfg)
|
|
|
|
|
|
|
|
pbuf := tuntest.Ping(wgaip.Addr(), tsaip.Addr())
|
|
|
|
m.tun.Outbound <- pbuf
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p := <-wgtun.Inbound:
|
|
|
|
|
|
|
|
// TODO(raggi): move to a bytes.Equal based test later, once
|
|
|
|
// tuntest.Ping produces correct checksums!
|
|
|
|
|
|
|
|
var pkt packet.Parsed
|
|
|
|
pkt.Decode(p)
|
|
|
|
if pkt.ICMP4Header().Type != packet.ICMP4EchoRequest {
|
|
|
|
t.Fatalf("unexpected packet: %x", p)
|
|
|
|
}
|
|
|
|
if pkt.Src.Addr() != masqip.Addr() {
|
|
|
|
t.Fatalf("bad source IP, got %s, want %s", pkt.Src.Addr(), masqip.Addr())
|
|
|
|
}
|
|
|
|
if pkt.Dst.Addr() != wgaip.Addr() {
|
|
|
|
t.Fatalf("bad source IP, got %s, want %s", pkt.Src.Addr(), masqip.Addr())
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatal("no packet after 1s")
|
|
|
|
}
|
|
|
|
}
|
2023-04-14 20:37:10 +00:00
|
|
|
|
2023-05-03 00:49:56 +00:00
|
|
|
// applyNetworkMap is a test helper that sets the network map and
|
|
|
|
// configures WG.
|
|
|
|
func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) {
|
|
|
|
t.Helper()
|
|
|
|
m.conn.SetNetworkMap(nm)
|
|
|
|
// Make sure we can't use v6 to avoid test failures.
|
|
|
|
m.conn.noV6.Store(true)
|
|
|
|
|
|
|
|
// Turn the network map into a wireguard config (for the tailscale internal wireguard device).
|
2024-05-17 20:47:57 +00:00
|
|
|
cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "")
|
2023-05-03 00:49:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// Apply the wireguard config to the tailscale internal wireguard device.
|
|
|
|
if err := m.Reconfig(cfg); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) {
|
2023-05-03 21:56:51 +00:00
|
|
|
t.Skip("This test is flaky; see https://github.com/tailscale/tailscale/issues/8037")
|
|
|
|
|
2023-05-03 00:49:56 +00:00
|
|
|
clock := &tstest.Clock{}
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1))
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Create a TS client.
|
|
|
|
tskey := key.NewNode()
|
|
|
|
tsaip := netip.MustParsePrefix("100.111.222.111/32")
|
|
|
|
|
|
|
|
// Create a WireGuard only client.
|
|
|
|
wgkey := key.NewNode()
|
|
|
|
wgaip := netip.MustParsePrefix("100.222.111.222/32")
|
|
|
|
|
|
|
|
uapi := fmt.Sprintf("private_key=%s\npublic_key=%s\nallowed_ip=%s\n\n",
|
|
|
|
wgkey.UntypedHexString(), tskey.Public().UntypedHexString(), tsaip.String())
|
|
|
|
|
|
|
|
wgdev, wgtun, port := newWireguard(t, uapi, []netip.Prefix{wgaip})
|
|
|
|
defer wgdev.Close()
|
|
|
|
wgEp := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), port)
|
|
|
|
wgEp2 := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.2"), port)
|
|
|
|
|
|
|
|
m := newMagicStackWithKey(t, t.Logf, localhostListener{}, derpMap, tskey)
|
|
|
|
defer m.Close()
|
|
|
|
|
|
|
|
pr := newPingResponder(t)
|
|
|
|
// Get a destination address which includes a port, so that UDP packets flow
|
|
|
|
// to the correct place, the mockPinger will use this to direct port-less
|
|
|
|
// pings to this place.
|
|
|
|
pingDest := pr.LocalAddr()
|
|
|
|
|
|
|
|
// Create and start the pinger that is used for the
|
|
|
|
// wireguard only endpoint pings
|
|
|
|
p, closeP := mockPinger(t, clock, pingDest)
|
|
|
|
defer closeP()
|
|
|
|
m.conn.wgPinger.Set(p)
|
|
|
|
|
|
|
|
// Create an IPv6 endpoint which should not receive any traffic.
|
|
|
|
v6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.ParseIP("::"), Port: 0})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
badEpRecv := make(chan []byte)
|
|
|
|
go func() {
|
|
|
|
defer v6.Close()
|
|
|
|
for {
|
|
|
|
b := make([]byte, 1500)
|
|
|
|
n, _, err := v6.ReadFrom(b)
|
|
|
|
if err != nil {
|
|
|
|
close(badEpRecv)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
badEpRecv <- b[:n]
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String())
|
|
|
|
|
|
|
|
nm := &netmap.NetworkMap{
|
|
|
|
Name: "ts",
|
|
|
|
PrivateKey: m.privateKey,
|
|
|
|
NodeKey: m.privateKey.Public(),
|
2023-09-18 06:31:34 +00:00
|
|
|
SelfNode: (&tailcfg.Node{
|
|
|
|
Addresses: []netip.Prefix{tsaip},
|
|
|
|
}).View(),
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: nodeViews([]*tailcfg.Node{
|
2023-05-03 00:49:56 +00:00
|
|
|
{
|
|
|
|
Key: wgkey.Public(),
|
2023-10-01 04:05:02 +00:00
|
|
|
Endpoints: []netip.AddrPort{wgEp, wgEp2, wgEpV6},
|
2023-05-03 00:49:56 +00:00
|
|
|
IsWireGuardOnly: true,
|
|
|
|
Addresses: []netip.Prefix{wgaip},
|
|
|
|
AllowedIPs: []netip.Prefix{wgaip},
|
|
|
|
},
|
2023-08-18 14:57:44 +00:00
|
|
|
}),
|
2023-05-03 00:49:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
applyNetworkMap(t, m, nm)
|
|
|
|
|
|
|
|
buf := tuntest.Ping(wgaip.Addr(), tsaip.Addr())
|
|
|
|
m.tun.Outbound <- buf
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p := <-wgtun.Inbound:
|
|
|
|
if !bytes.Equal(p, buf) {
|
|
|
|
t.Errorf("got unexpected packet: %x", p)
|
|
|
|
}
|
|
|
|
case <-badEpRecv:
|
|
|
|
t.Fatal("got packet on bad endpoint")
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("no packet after 1s")
|
|
|
|
}
|
|
|
|
|
|
|
|
pi, ok := m.conn.peerMap.byNodeKey[wgkey.Public()]
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("wgkey doesn't exist in peer map")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we got a valid address set on the first send - this
|
|
|
|
// will be randomly selected, but because we have noV6 set to true,
|
|
|
|
// it will be the IPv4 address.
|
|
|
|
if !pi.ep.bestAddr.Addr().IsValid() {
|
|
|
|
t.Fatal("bestaddr was nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
if pi.ep.trustBestAddrUntil.Before(mono.Now().Add(14 * time.Second)) {
|
|
|
|
t.Errorf("trustBestAddrUntil time wasn't set to 15 seconds in the future: got %v", pi.ep.trustBestAddrUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
for ipp, state := range pi.ep.endpointState {
|
|
|
|
if ipp == wgEp {
|
|
|
|
if len(state.recentPongs) != 1 {
|
|
|
|
t.Errorf("IPv4 address did not have a recentPong entry: got %v, want %v", len(state.recentPongs), 1)
|
|
|
|
}
|
|
|
|
// Set the latency extremely low so we choose this endpoint during the next
|
|
|
|
// addrForSendLocked call.
|
|
|
|
state.recentPongs[state.recentPong].latency = time.Nanosecond
|
|
|
|
}
|
|
|
|
|
|
|
|
if ipp == wgEp2 {
|
|
|
|
if len(state.recentPongs) != 1 {
|
|
|
|
t.Errorf("IPv4 address did not have a recentPong entry: got %v, want %v", len(state.recentPongs), 1)
|
|
|
|
}
|
|
|
|
// Set the latency extremely high so we dont choose endpoint during the next
|
|
|
|
// addrForSendLocked call.
|
|
|
|
state.recentPongs[state.recentPong].latency = time.Second
|
|
|
|
}
|
|
|
|
|
|
|
|
if ipp == wgEpV6 && len(state.recentPongs) != 0 {
|
|
|
|
t.Fatal("IPv6 should not have recentPong: IPv6 is not useable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set trustBestAddrUnitl to now, so addrForSendLocked goes through the
|
|
|
|
// latency selection flow.
|
|
|
|
pi.ep.trustBestAddrUntil = mono.Now().Add(-time.Second)
|
|
|
|
|
|
|
|
buf = tuntest.Ping(wgaip.Addr(), tsaip.Addr())
|
|
|
|
m.tun.Outbound <- buf
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p := <-wgtun.Inbound:
|
|
|
|
if !bytes.Equal(p, buf) {
|
|
|
|
t.Errorf("got unexpected packet: %x", p)
|
|
|
|
}
|
|
|
|
case <-badEpRecv:
|
|
|
|
t.Fatal("got packet on bad endpoint")
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("no packet after 1s")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we have responded to a WireGuard only ping twice.
|
|
|
|
if pr.responseCount != 2 {
|
|
|
|
t.Fatal("pingresponder response count was not 2", pr.responseCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
pi, ok = m.conn.peerMap.byNodeKey[wgkey.Public()]
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("wgkey doesn't exist in peer map")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !pi.ep.bestAddr.Addr().IsValid() {
|
|
|
|
t.Error("no bestAddr address was set")
|
|
|
|
}
|
|
|
|
|
|
|
|
if pi.ep.bestAddr.Addr() != wgEp.Addr() {
|
|
|
|
t.Errorf("bestAddr was not set to the expected IPv4 address: got %v, want %v", pi.ep.bestAddr.Addr().String(), wgEp.Addr())
|
|
|
|
}
|
|
|
|
|
|
|
|
if pi.ep.trustBestAddrUntil.IsZero() {
|
|
|
|
t.Fatal("trustBestAddrUntil was not set")
|
|
|
|
}
|
|
|
|
|
|
|
|
if pi.ep.trustBestAddrUntil.Before(mono.Now().Add(55 * time.Minute)) {
|
|
|
|
// Set to 55 minutes incase of sloooow tests.
|
|
|
|
t.Errorf("trustBestAddrUntil time wasn't set to an hour in the future: got %v", pi.ep.trustBestAddrUntil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// udpingPacketConn will convert potentially ICMP destination addrs to UDP
|
|
|
|
// destination addrs in WriteTo so that a test that is intending to send ICMP
|
|
|
|
// traffic will instead send UDP traffic, without the higher level Pinger being
|
|
|
|
// aware of this difference.
|
|
|
|
type udpingPacketConn struct {
|
|
|
|
net.PacketConn
|
|
|
|
// destPort will be configured by the test to be the peer expected to respond to a ping.
|
|
|
|
destPort uint16
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *udpingPacketConn) WriteTo(body []byte, dest net.Addr) (int, error) {
|
|
|
|
switch d := dest.(type) {
|
|
|
|
case *net.IPAddr:
|
|
|
|
udpAddr := &net.UDPAddr{
|
|
|
|
IP: d.IP,
|
|
|
|
Port: int(u.destPort),
|
|
|
|
Zone: d.Zone,
|
|
|
|
}
|
|
|
|
return u.PacketConn.WriteTo(body, udpAddr)
|
|
|
|
}
|
|
|
|
return 0, fmt.Errorf("unimplemented udpingPacketConn for %T", dest)
|
|
|
|
}
|
|
|
|
|
|
|
|
type mockListenPacketer struct {
|
|
|
|
conn4 net.PacketConn
|
|
|
|
conn6 net.PacketConn
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mlp *mockListenPacketer) ListenPacket(ctx context.Context, typ string, addr string) (net.PacketConn, error) {
|
|
|
|
switch typ {
|
|
|
|
case "ip4:icmp":
|
|
|
|
return mlp.conn4, nil
|
|
|
|
case "ip6:icmp":
|
|
|
|
return mlp.conn6, nil
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("unimplemented ListenPacketForTesting for %s", typ)
|
|
|
|
}
|
|
|
|
|
|
|
|
func mockPinger(t *testing.T, clock *tstest.Clock, dest net.Addr) (*ping.Pinger, func()) {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
dIPP := netip.MustParseAddrPort(dest.String())
|
|
|
|
// In tests, we use UDP so that we can test without being root; this
|
|
|
|
// doesn't matter because we mock out the ICMP reply below to be a real
|
|
|
|
// ICMP echo reply packet.
|
|
|
|
conn4, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("net.ListenPacket: %v", err)
|
|
|
|
}
|
|
|
|
conn6, err := net.ListenPacket("udp6", "[::]:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("net.ListenPacket: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
conn4 = &udpingPacketConn{
|
|
|
|
PacketConn: conn4,
|
|
|
|
destPort: dIPP.Port(),
|
|
|
|
}
|
|
|
|
|
|
|
|
conn6 = &udpingPacketConn{
|
|
|
|
PacketConn: conn6,
|
|
|
|
destPort: dIPP.Port(),
|
|
|
|
}
|
|
|
|
|
|
|
|
p := ping.New(ctx, t.Logf, &mockListenPacketer{conn4: conn4, conn6: conn6})
|
|
|
|
|
|
|
|
done := func() {
|
|
|
|
if err := p.Close(); err != nil {
|
|
|
|
t.Errorf("error on close: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return p, done
|
|
|
|
}
|
|
|
|
|
|
|
|
type pingResponder struct {
|
|
|
|
net.PacketConn
|
|
|
|
running atomic.Bool
|
|
|
|
responseCount int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pingResponder) start() {
|
|
|
|
buf := make([]byte, 1500)
|
|
|
|
for p.running.Load() {
|
|
|
|
n, addr, err := p.PacketConn.ReadFrom(buf)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
m, err := icmp.ParseMessage(1, buf[:n])
|
|
|
|
if err != nil {
|
|
|
|
panic("got a non-ICMP message:" + fmt.Sprintf("%x", m))
|
|
|
|
}
|
|
|
|
|
|
|
|
r := icmp.Message{
|
|
|
|
Type: ipv4.ICMPTypeEchoReply,
|
|
|
|
Code: m.Code,
|
|
|
|
Body: m.Body,
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := r.Marshal(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := p.PacketConn.WriteTo(b, addr); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
p.responseCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pingResponder) stop() {
|
|
|
|
p.running.Store(false)
|
|
|
|
p.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPingResponder(t *testing.T) *pingResponder {
|
|
|
|
t.Helper()
|
|
|
|
// global binds should be both IPv4 and IPv6 (if our test platforms don't,
|
|
|
|
// we might need to bind two sockets instead)
|
|
|
|
conn, err := net.ListenPacket("udp", ":")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
pr := &pingResponder{PacketConn: conn}
|
|
|
|
pr.running.Store(true)
|
|
|
|
go pr.start()
|
|
|
|
t.Cleanup(pr.stop)
|
|
|
|
return pr
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddrForSendLockedForWireGuardOnly(t *testing.T) {
|
|
|
|
testTime := mono.Now()
|
2023-08-22 00:09:35 +00:00
|
|
|
secondPingTime := testTime.Add(10 * time.Second)
|
2023-05-03 00:49:56 +00:00
|
|
|
|
|
|
|
type endpointDetails struct {
|
|
|
|
addrPort netip.AddrPort
|
|
|
|
latency time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
wgTests := []struct {
|
2023-08-22 00:09:35 +00:00
|
|
|
name string
|
|
|
|
sendInitialPing bool
|
|
|
|
validAddr bool
|
|
|
|
sendFollowUpPing bool
|
|
|
|
pingTime mono.Time
|
|
|
|
ep []endpointDetails
|
|
|
|
want netip.AddrPort
|
2023-05-03 00:49:56 +00:00
|
|
|
}{
|
|
|
|
{
|
2023-08-22 00:09:35 +00:00
|
|
|
name: "no endpoints",
|
|
|
|
sendInitialPing: false,
|
|
|
|
validAddr: false,
|
|
|
|
sendFollowUpPing: false,
|
|
|
|
pingTime: testTime,
|
|
|
|
ep: []endpointDetails{},
|
|
|
|
want: netip.AddrPort{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "singular endpoint does not request ping",
|
|
|
|
sendInitialPing: false,
|
|
|
|
validAddr: true,
|
|
|
|
sendFollowUpPing: false,
|
|
|
|
pingTime: testTime,
|
|
|
|
ep: []endpointDetails{
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ping sent within wireguardPingInterval should not request ping",
|
|
|
|
sendInitialPing: true,
|
|
|
|
validAddr: true,
|
|
|
|
sendFollowUpPing: false,
|
|
|
|
pingTime: testTime.Add(7 * time.Second),
|
|
|
|
ep: []endpointDetails{
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222"),
|
|
|
|
latency: 2000 * time.Millisecond,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ping sent outside of wireguardPingInterval should request ping",
|
|
|
|
sendInitialPing: true,
|
|
|
|
validAddr: true,
|
|
|
|
sendFollowUpPing: true,
|
|
|
|
pingTime: testTime.Add(3 * time.Second),
|
|
|
|
ep: []endpointDetails{
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222"),
|
|
|
|
latency: 150 * time.Millisecond,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "choose lowest latency for useable IPv4 and IPv6",
|
|
|
|
sendInitialPing: true,
|
|
|
|
validAddr: true,
|
|
|
|
sendFollowUpPing: false,
|
|
|
|
pingTime: secondPingTime,
|
2023-05-03 00:49:56 +00:00
|
|
|
ep: []endpointDetails{
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222"),
|
|
|
|
latency: 10 * time.Millisecond,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: netip.MustParseAddrPort("[2345:0425:2CA1:0000:0000:0567:5673:23b5]:222"),
|
|
|
|
},
|
|
|
|
{
|
2023-08-22 00:09:35 +00:00
|
|
|
name: "choose IPv6 address when latency is the same for v4 and v6",
|
|
|
|
sendInitialPing: true,
|
|
|
|
validAddr: true,
|
|
|
|
sendFollowUpPing: false,
|
|
|
|
pingTime: secondPingTime,
|
2023-05-03 00:49:56 +00:00
|
|
|
ep: []endpointDetails{
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("1.1.1.1:111"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
addrPort: netip.MustParseAddrPort("[1::1]:567"),
|
|
|
|
latency: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: netip.MustParseAddrPort("[1::1]:567"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range wgTests {
|
2023-08-22 00:09:35 +00:00
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
endpoint := &endpoint{
|
|
|
|
isWireguardOnly: true,
|
|
|
|
endpointState: map[netip.AddrPort]*endpointState{},
|
|
|
|
c: &Conn{
|
|
|
|
logf: t.Logf,
|
|
|
|
noV4: atomic.Bool{},
|
|
|
|
noV6: atomic.Bool{},
|
|
|
|
},
|
|
|
|
}
|
2023-05-03 00:49:56 +00:00
|
|
|
|
2023-08-22 00:09:35 +00:00
|
|
|
for _, epd := range test.ep {
|
|
|
|
endpoint.endpointState[epd.addrPort] = &endpointState{}
|
|
|
|
}
|
|
|
|
udpAddr, _, shouldPing := endpoint.addrForSendLocked(testTime)
|
|
|
|
if udpAddr.IsValid() != test.validAddr {
|
|
|
|
t.Errorf("udpAddr validity is incorrect; got %v, want %v", udpAddr.IsValid(), test.validAddr)
|
|
|
|
}
|
|
|
|
if shouldPing != test.sendInitialPing {
|
|
|
|
t.Errorf("addrForSendLocked did not indiciate correct ping state; got %v, want %v", shouldPing, test.sendInitialPing)
|
|
|
|
}
|
2023-05-03 00:49:56 +00:00
|
|
|
|
2023-08-22 00:09:35 +00:00
|
|
|
// Update the endpointState to simulate a ping having been
|
|
|
|
// sent and a pong received.
|
|
|
|
for _, epd := range test.ep {
|
|
|
|
state, ok := endpoint.endpointState[epd.addrPort]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("addr does not exist in endpoint state map")
|
|
|
|
}
|
|
|
|
state.lastPing = test.pingTime
|
2023-05-03 00:49:56 +00:00
|
|
|
|
2023-08-22 00:09:35 +00:00
|
|
|
latency, ok := state.latencyLocked()
|
|
|
|
if ok {
|
|
|
|
t.Errorf("latency was set for %v: %v", epd.addrPort, latency)
|
|
|
|
}
|
|
|
|
state.recentPongs = append(state.recentPongs, pongReply{
|
|
|
|
latency: epd.latency,
|
|
|
|
})
|
|
|
|
state.recentPong = 0
|
2023-05-03 00:49:56 +00:00
|
|
|
}
|
|
|
|
|
2023-08-22 00:09:35 +00:00
|
|
|
udpAddr, _, shouldPing = endpoint.addrForSendLocked(secondPingTime)
|
|
|
|
if udpAddr != test.want {
|
|
|
|
t.Errorf("udpAddr returned is not expected: got %v, want %v", udpAddr, test.want)
|
2023-05-03 00:49:56 +00:00
|
|
|
}
|
2023-08-22 00:09:35 +00:00
|
|
|
if shouldPing != test.sendFollowUpPing {
|
|
|
|
t.Errorf("addrForSendLocked did not indiciate correct ping state; got %v, want %v", shouldPing, test.sendFollowUpPing)
|
|
|
|
}
|
|
|
|
if endpoint.bestAddr.AddrPort != test.want {
|
|
|
|
t.Errorf("bestAddr.AddrPort is not as expected: got %v, want %v", endpoint.bestAddr.AddrPort, test.want)
|
|
|
|
}
|
|
|
|
})
|
2023-05-03 00:49:56 +00:00
|
|
|
}
|
|
|
|
}
|
2023-09-28 16:58:02 +00:00
|
|
|
|
|
|
|
func TestAddrForPingSizeLocked(t *testing.T) {
|
|
|
|
testTime := mono.Now()
|
|
|
|
|
|
|
|
validUdpAddr := netip.MustParseAddrPort("1.1.1.1:111")
|
|
|
|
validDerpAddr := netip.MustParseAddrPort("2.2.2.2:222")
|
|
|
|
|
|
|
|
pingTests := []struct {
|
|
|
|
desc string
|
|
|
|
size int // size of ping payload
|
|
|
|
mtu tstun.WireMTU // The MTU of the path to bestAddr, if any
|
|
|
|
bestAddr bool // If the endpoint should have a valid bestAddr
|
|
|
|
bestAddrTrusted bool // If the bestAddr has not yet expired
|
|
|
|
wantUDP bool // Non-zero UDP addr means send to UDP; zero means start discovery
|
|
|
|
wantDERP bool // Non-zero DERP addr means send to DERP
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "ping_size_0_and_invalid_UDP_addr_should_start_discovery_and_send_to_DERP",
|
|
|
|
size: 0,
|
|
|
|
bestAddr: false,
|
|
|
|
bestAddrTrusted: false,
|
|
|
|
wantUDP: false,
|
|
|
|
wantDERP: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_0_and_valid_trusted_UDP_addr_should_send_to_UDP_and_not_send_to_DERP",
|
|
|
|
size: 0,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: true,
|
|
|
|
wantUDP: true,
|
|
|
|
wantDERP: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_0_and_valid_but_expired_UDP_addr_should_send_to_both_UDP_and_DERP",
|
|
|
|
size: 0,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: false,
|
|
|
|
wantUDP: true,
|
|
|
|
wantDERP: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_too_big_for_trusted_UDP_addr_should_start_discovery_and_send_to_DERP",
|
2023-10-05 18:05:19 +00:00
|
|
|
size: pktLenToPingSize(1501, validUdpAddr.Addr().Is6()),
|
2023-09-28 16:58:02 +00:00
|
|
|
mtu: 1500,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: true,
|
|
|
|
wantUDP: false,
|
|
|
|
wantDERP: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_too_big_for_untrusted_UDP_addr_should_start_discovery_and_send_to_DERP",
|
2023-10-05 18:05:19 +00:00
|
|
|
size: pktLenToPingSize(1501, validUdpAddr.Addr().Is6()),
|
2023-09-28 16:58:02 +00:00
|
|
|
mtu: 1500,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: false,
|
|
|
|
wantUDP: false,
|
|
|
|
wantDERP: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_small_enough_for_trusted_UDP_addr_should_send_to_UDP_and_not_DERP",
|
2023-10-05 18:05:19 +00:00
|
|
|
size: pktLenToPingSize(1500, validUdpAddr.Addr().Is6()),
|
2023-09-28 16:58:02 +00:00
|
|
|
mtu: 1500,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: true,
|
|
|
|
wantUDP: true,
|
|
|
|
wantDERP: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "ping_size_small_enough_for_untrusted_UDP_addr_should_send_to_UDP_and_DERP",
|
2023-10-05 18:05:19 +00:00
|
|
|
size: pktLenToPingSize(1500, validUdpAddr.Addr().Is6()),
|
2023-09-28 16:58:02 +00:00
|
|
|
mtu: 1500,
|
|
|
|
bestAddr: true,
|
|
|
|
bestAddrTrusted: false,
|
|
|
|
wantUDP: true,
|
|
|
|
wantDERP: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range pingTests {
|
|
|
|
t.Run(test.desc, func(t *testing.T) {
|
|
|
|
bestAddr := addrQuality{wireMTU: test.mtu}
|
|
|
|
if test.bestAddr {
|
|
|
|
bestAddr.AddrPort = validUdpAddr
|
|
|
|
}
|
|
|
|
ep := &endpoint{
|
|
|
|
derpAddr: validDerpAddr,
|
|
|
|
bestAddr: bestAddr,
|
|
|
|
}
|
|
|
|
if test.bestAddrTrusted {
|
|
|
|
ep.trustBestAddrUntil = testTime.Add(1 * time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
udpAddr, derpAddr := ep.addrForPingSizeLocked(testTime, test.size)
|
|
|
|
|
|
|
|
if test.wantUDP && !udpAddr.IsValid() {
|
|
|
|
t.Errorf("%s: udpAddr returned is not valid, won't be sent to UDP address", test.desc)
|
|
|
|
}
|
|
|
|
if !test.wantUDP && udpAddr.IsValid() {
|
|
|
|
t.Errorf("%s: udpAddr returned is valid, discovery will not start", test.desc)
|
|
|
|
}
|
|
|
|
if test.wantDERP && !derpAddr.IsValid() {
|
|
|
|
t.Errorf("%s: derpAddr returned is not valid, won't be sent to DERP", test.desc)
|
|
|
|
}
|
|
|
|
if !test.wantDERP && derpAddr.IsValid() {
|
|
|
|
t.Errorf("%s: derpAddr returned is valid, will be sent to DERP", test.desc)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2024-03-08 17:32:15 +00:00
|
|
|
|
|
|
|
func TestMaybeSetNearestDERP(t *testing.T) {
|
|
|
|
derpMap := &tailcfg.DERPMap{
|
|
|
|
Regions: map[int]*tailcfg.DERPRegion{
|
|
|
|
1: {
|
|
|
|
RegionID: 1,
|
|
|
|
RegionCode: "test",
|
|
|
|
Nodes: []*tailcfg.DERPNode{
|
|
|
|
{
|
|
|
|
Name: "t1",
|
|
|
|
RegionID: 1,
|
|
|
|
HostName: "test-node.unused",
|
|
|
|
IPv4: "127.0.0.1",
|
|
|
|
IPv6: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
21: {
|
|
|
|
RegionID: 21,
|
|
|
|
RegionCode: "tor",
|
|
|
|
Nodes: []*tailcfg.DERPNode{
|
|
|
|
{
|
|
|
|
Name: "21b",
|
|
|
|
RegionID: 21,
|
|
|
|
HostName: "tor.test-node.unused",
|
|
|
|
IPv4: "127.0.0.1",
|
|
|
|
IPv6: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
31: {
|
|
|
|
RegionID: 31,
|
|
|
|
RegionCode: "fallback",
|
|
|
|
Nodes: []*tailcfg.DERPNode{
|
|
|
|
{
|
|
|
|
Name: "31b",
|
|
|
|
RegionID: 31,
|
|
|
|
HostName: "fallback.test-node.unused",
|
|
|
|
IPv4: "127.0.0.1",
|
|
|
|
IPv6: "none",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that our fallback code always picks a deterministic value.
|
|
|
|
tstest.Replace(t, &pickDERPFallbackForTests, func() int { return 31 })
|
|
|
|
|
|
|
|
// Actually test this code path.
|
|
|
|
tstest.Replace(t, &checkControlHealthDuringNearestDERPInTests, true)
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
old int
|
|
|
|
reportDERP int
|
|
|
|
connectedToControl bool
|
|
|
|
want int
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "connected_with_report_derp",
|
|
|
|
old: 1,
|
|
|
|
reportDERP: 21,
|
|
|
|
connectedToControl: true,
|
|
|
|
want: 21,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "not_connected_with_report_derp",
|
|
|
|
old: 1,
|
|
|
|
reportDERP: 21,
|
|
|
|
connectedToControl: false,
|
|
|
|
want: 1, // no change
|
|
|
|
},
|
2024-06-26 02:23:19 +00:00
|
|
|
{
|
|
|
|
name: "not_connected_with_report_derp_and_no_current",
|
|
|
|
old: 0, // no current DERP
|
|
|
|
reportDERP: 21, // have new DERP
|
|
|
|
connectedToControl: false, // not connected...
|
|
|
|
want: 21, // ... but want to change to new DERP
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "not_connected_with_fallback_and_no_current",
|
|
|
|
old: 0, // no current DERP
|
|
|
|
reportDERP: 0, // no new DERP
|
|
|
|
connectedToControl: false, // not connected...
|
|
|
|
want: 31, // ... but we fallback to deterministic value
|
|
|
|
},
|
2024-03-08 17:32:15 +00:00
|
|
|
{
|
|
|
|
name: "connected_no_derp",
|
|
|
|
old: 1,
|
|
|
|
reportDERP: 0,
|
|
|
|
connectedToControl: true,
|
|
|
|
want: 1, // no change
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "connected_no_derp_fallback",
|
|
|
|
old: 0,
|
|
|
|
reportDERP: 0,
|
|
|
|
connectedToControl: true,
|
|
|
|
want: 31, // deterministic fallback
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range testCases {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2024-04-26 15:06:06 +00:00
|
|
|
ht := new(health.Tracker)
|
2024-07-10 21:46:31 +00:00
|
|
|
c := newConn(t.Logf)
|
2024-03-08 17:32:15 +00:00
|
|
|
c.myDerp = tt.old
|
|
|
|
c.derpMap = derpMap
|
2024-04-26 15:06:06 +00:00
|
|
|
c.health = ht
|
2024-03-08 17:32:15 +00:00
|
|
|
|
|
|
|
report := &netcheck.Report{PreferredDERP: tt.reportDERP}
|
|
|
|
|
2024-04-26 15:06:06 +00:00
|
|
|
oldConnected := ht.GetInPollNetMap()
|
2024-03-08 17:32:15 +00:00
|
|
|
if tt.connectedToControl != oldConnected {
|
|
|
|
if tt.connectedToControl {
|
2024-04-26 15:06:06 +00:00
|
|
|
ht.GotStreamedMapResponse()
|
|
|
|
t.Cleanup(ht.SetOutOfPollNetMap)
|
2024-03-08 17:32:15 +00:00
|
|
|
} else {
|
2024-04-26 15:06:06 +00:00
|
|
|
ht.SetOutOfPollNetMap()
|
|
|
|
t.Cleanup(ht.GotStreamedMapResponse)
|
2024-03-08 17:32:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
got := c.maybeSetNearestDERP(report)
|
|
|
|
if got != tt.want {
|
|
|
|
t.Errorf("got new DERP region %d, want %d", got, tt.want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2024-04-15 20:57:55 +00:00
|
|
|
|
|
|
|
func TestMaybeRebindOnError(t *testing.T) {
|
|
|
|
tstest.PanicOnLog()
|
|
|
|
tstest.ResourceCheck(t)
|
|
|
|
|
2024-09-25 23:06:21 +00:00
|
|
|
err := fmt.Errorf("outer err: %w", syscall.EPERM)
|
2024-04-15 20:57:55 +00:00
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
t.Run("darwin-rebind", func(t *testing.T) {
|
2024-09-25 23:06:21 +00:00
|
|
|
conn := newTestConn(t)
|
|
|
|
defer conn.Close()
|
|
|
|
rebound := conn.maybeRebindOnError("darwin", err)
|
2024-04-15 20:57:55 +00:00
|
|
|
if !rebound {
|
|
|
|
t.Errorf("darwin should rebind on syscall.EPERM")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
t.Run("linux-not-rebind", func(t *testing.T) {
|
2024-09-25 23:06:21 +00:00
|
|
|
conn := newTestConn(t)
|
|
|
|
defer conn.Close()
|
|
|
|
rebound := conn.maybeRebindOnError("linux", err)
|
2024-04-15 20:57:55 +00:00
|
|
|
if rebound {
|
|
|
|
t.Errorf("linux should not rebind on syscall.EPERM")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2024-04-27 01:28:01 +00:00
|
|
|
t.Run("no-frequent-rebind", func(t *testing.T) {
|
2024-09-25 23:06:21 +00:00
|
|
|
conn := newTestConn(t)
|
|
|
|
defer conn.Close()
|
2024-04-15 20:57:55 +00:00
|
|
|
conn.lastEPERMRebind.Store(time.Now().Add(-1 * time.Second))
|
2024-09-25 23:06:21 +00:00
|
|
|
rebound := conn.maybeRebindOnError("darwin", err)
|
2024-04-15 20:57:55 +00:00
|
|
|
if rebound {
|
|
|
|
t.Errorf("darwin should not rebind on syscall.EPERM within 5 seconds of last")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2024-10-29 09:19:40 +00:00
|
|
|
|
|
|
|
func TestNetworkDownSendErrors(t *testing.T) {
|
|
|
|
netMon := must.Get(netmon.New(t.Logf))
|
|
|
|
defer netMon.Close()
|
|
|
|
|
|
|
|
reg := new(usermetric.Registry)
|
|
|
|
conn := must.Get(NewConn(Options{
|
|
|
|
DisablePortMapper: true,
|
|
|
|
Logf: t.Logf,
|
|
|
|
NetMon: netMon,
|
|
|
|
Metrics: reg,
|
|
|
|
}))
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
conn.SetNetworkUp(false)
|
|
|
|
if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}); err == nil {
|
|
|
|
t.Error("expected error, got nil")
|
|
|
|
}
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
reg.Handler(resp, new(http.Request))
|
|
|
|
if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) {
|
|
|
|
t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String())
|
|
|
|
}
|
|
|
|
}
|