2023-01-27 13:37:20 -08:00
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
2021-01-05 11:02:52 -08:00
2021-02-04 13:12:42 -08:00
package ipnlocal
2021-01-05 11:02:52 -08:00
import (
2023-01-17 20:59:03 +00:00
"context"
2025-06-18 10:31:00 -07:00
"encoding/binary"
2024-03-08 10:43:32 -06:00
"encoding/json"
2023-12-05 17:16:34 -05:00
"errors"
2021-04-21 12:57:48 -07:00
"fmt"
2025-06-06 15:53:30 +01:00
"maps"
2024-04-15 18:14:20 -04:00
"math"
2021-10-14 16:40:06 -04:00
"net"
2021-04-21 12:57:48 -07:00
"net/http"
2022-07-25 20:55:44 -07:00
"net/netip"
2024-03-08 10:43:32 -06:00
"os"
2024-10-31 08:30:11 -07:00
"path/filepath"
2021-03-04 12:04:31 -08:00
"reflect"
2023-10-27 14:20:10 -07:00
"slices"
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
"strings"
2024-03-08 10:43:32 -06:00
"sync"
2021-02-04 13:12:42 -08:00
"testing"
2021-04-28 08:52:04 -07:00
"time"
2021-02-04 13:12:42 -08:00
2024-03-08 10:43:32 -06:00
"github.com/google/go-cmp/cmp"
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
"github.com/google/go-cmp/cmp/cmpopts"
2025-06-18 10:31:00 -07:00
memro "go4.org/mem"
2022-07-24 20:08:42 -07:00
"go4.org/netipx"
2023-10-27 14:20:10 -07:00
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/appc"
2024-01-22 16:57:31 -08:00
"tailscale.com/appc/appctest"
2024-04-19 13:37:21 -07:00
"tailscale.com/clientupdate"
2022-12-19 10:23:47 -08:00
"tailscale.com/control/controlclient"
2024-04-02 13:32:30 -07:00
"tailscale.com/drive"
"tailscale.com/drive/driveimpl"
2024-05-03 10:59:22 -04:00
"tailscale.com/health"
2024-06-06 16:31:52 -07:00
"tailscale.com/hostinfo"
2021-04-21 12:57:48 -07:00
"tailscale.com/ipn"
2024-10-31 08:30:11 -07:00
"tailscale.com/ipn/conffile"
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
"tailscale.com/ipn/ipnauth"
2022-02-28 13:08:45 -08:00
"tailscale.com/ipn/store/mem"
2024-04-15 18:14:20 -04:00
"tailscale.com/net/netcheck"
2024-04-27 21:18:18 -07:00
"tailscale.com/net/netmon"
2021-02-22 20:43:35 -08:00
"tailscale.com/net/tsaddr"
2024-06-28 23:17:31 -04:00
"tailscale.com/net/tsdial"
2021-01-05 11:02:52 -08:00
"tailscale.com/tailcfg"
2023-05-03 13:57:17 -07:00
"tailscale.com/tsd"
2022-12-19 10:23:47 -08:00
"tailscale.com/tstest"
2023-09-20 13:07:48 -07:00
"tailscale.com/types/dnstype"
2025-03-07 15:07:00 +00:00
"tailscale.com/types/ipproto"
2023-02-19 09:17:06 -07:00
"tailscale.com/types/key"
2021-04-28 08:52:04 -07:00
"tailscale.com/types/logger"
2023-03-23 10:49:56 -07:00
"tailscale.com/types/logid"
2021-02-05 15:44:46 -08:00
"tailscale.com/types/netmap"
2023-12-18 16:57:03 -06:00
"tailscale.com/types/opt"
2023-09-01 19:28:00 -07:00
"tailscale.com/types/ptr"
2024-03-08 10:43:32 -06:00
"tailscale.com/types/views"
2023-09-20 13:07:48 -07:00
"tailscale.com/util/dnsname"
2025-07-03 14:32:28 -05:00
"tailscale.com/util/eventbus"
2023-10-27 16:35:18 -05:00
"tailscale.com/util/mak"
2023-10-27 14:20:10 -07:00
"tailscale.com/util/must"
2024-06-03 16:49:55 -04:00
"tailscale.com/util/set"
2023-11-29 16:48:25 -05:00
"tailscale.com/util/syspolicy"
2024-10-08 10:50:14 -05:00
"tailscale.com/util/syspolicy/setting"
"tailscale.com/util/syspolicy/source"
2021-04-21 12:57:48 -07:00
"tailscale.com/wgengine"
2022-11-02 13:13:26 -07:00
"tailscale.com/wgengine/filter"
2025-03-07 15:07:00 +00:00
"tailscale.com/wgengine/filter/filtertype"
2021-03-04 12:04:31 -08:00
"tailscale.com/wgengine/wgcfg"
2021-01-05 11:02:52 -08:00
)
2024-04-11 10:12:13 -07:00
func fakeStoreRoutes ( * appc . RouteInfo ) error { return nil }
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
func inRemove ( ip netip . Addr ) bool {
2021-03-17 17:04:32 -07:00
for _ , pfx := range removeFromDefaultRoute {
if pfx . Contains ( ip ) {
return true
}
}
return false
}
2025-06-18 10:31:00 -07:00
func makeNodeKeyFromID ( nodeID tailcfg . NodeID ) key . NodePublic {
raw := make ( [ ] byte , 32 )
binary . BigEndian . PutUint64 ( raw [ 24 : ] , uint64 ( nodeID ) )
return key . NodePublicFromRaw32 ( memro . B ( raw ) )
}
2025-06-24 13:39:29 -05:00
func makeDiscoKeyFromID ( nodeID tailcfg . NodeID ) ( ret key . DiscoPublic ) {
raw := make ( [ ] byte , 32 )
binary . BigEndian . PutUint64 ( raw [ 24 : ] , uint64 ( nodeID ) )
return key . DiscoPublicFromRaw32 ( memro . B ( raw ) )
}
2021-02-22 20:43:35 -08:00
func TestShrinkDefaultRoute ( t * testing . T ) {
tests := [ ] struct {
2021-03-17 17:04:32 -07:00
route string
in [ ] string
out [ ] string
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
localIPFn func ( netip . Addr ) bool // true if this machine's local IP address should be "in" after shrinking.
2021-02-22 20:43:35 -08:00
} {
{
route : "0.0.0.0/0" ,
in : [ ] string { "1.2.3.4" , "25.0.0.1" } ,
out : [ ] string {
"10.0.0.1" ,
"10.255.255.255" ,
"192.168.0.1" ,
"192.168.255.255" ,
"172.16.0.1" ,
"172.31.255.255" ,
"100.101.102.103" ,
2021-03-17 17:04:32 -07:00
"224.0.0.1" ,
"169.254.169.254" ,
2021-02-22 20:43:35 -08:00
// Some random IPv6 stuff that shouldn't be in a v4
// default route.
"fe80::" ,
"2601::1" ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
localIPFn : func ( ip netip . Addr ) bool { return ! inRemove ( ip ) && ip . Is4 ( ) } ,
2021-02-22 20:43:35 -08:00
} ,
{
route : "::/0" ,
in : [ ] string { "::1" , "2601::1" } ,
out : [ ] string {
"fe80::1" ,
2021-03-17 17:04:32 -07:00
"ff00::1" ,
2022-07-24 20:08:42 -07:00
tsaddr . TailscaleULARange ( ) . Addr ( ) . String ( ) ,
2021-02-22 20:43:35 -08:00
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
localIPFn : func ( ip netip . Addr ) bool { return ! inRemove ( ip ) && ip . Is6 ( ) } ,
2021-02-22 20:43:35 -08:00
} ,
}
2021-12-16 11:15:45 -08:00
// Construct a fake local network environment to make this test hermetic.
// localInterfaceRoutes and hostIPs would normally come from calling interfaceRoutes,
// and localAddresses would normally come from calling interfaces.LocalAddresses.
2022-07-24 20:08:42 -07:00
var b netipx . IPSetBuilder
2021-12-16 11:15:45 -08:00
for _ , c := range [ ] string { "127.0.0.0/8" , "192.168.9.0/24" , "fe80::/32" } {
2022-07-25 20:55:44 -07:00
p := netip . MustParsePrefix ( c )
2021-12-16 11:15:45 -08:00
b . AddPrefix ( p )
}
localInterfaceRoutes , err := b . IPSet ( )
if err != nil {
t . Fatal ( err )
}
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
hostIPs := [ ] netip . Addr {
2022-07-25 20:55:44 -07:00
netip . MustParseAddr ( "127.0.0.1" ) ,
netip . MustParseAddr ( "192.168.9.39" ) ,
netip . MustParseAddr ( "fe80::1" ) ,
netip . MustParseAddr ( "fe80::437d:feff:feca:49a7" ) ,
2021-12-16 11:15:45 -08:00
}
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
localAddresses := [ ] netip . Addr {
2022-07-25 20:55:44 -07:00
netip . MustParseAddr ( "192.168.9.39" ) ,
2021-12-16 11:15:45 -08:00
}
2021-02-22 20:43:35 -08:00
for _ , test := range tests {
2022-07-25 20:55:44 -07:00
def := netip . MustParsePrefix ( test . route )
2021-12-16 11:15:45 -08:00
got , err := shrinkDefaultRoute ( def , localInterfaceRoutes , hostIPs )
2021-02-22 20:43:35 -08:00
if err != nil {
t . Fatalf ( "shrinkDefaultRoute(%q): %v" , test . route , err )
}
for _ , ip := range test . in {
2022-07-25 20:55:44 -07:00
if ! got . Contains ( netip . MustParseAddr ( ip ) ) {
2021-02-22 20:43:35 -08:00
t . Errorf ( "shrink(%q).Contains(%v) = false, want true" , test . route , ip )
}
}
for _ , ip := range test . out {
2022-07-25 20:55:44 -07:00
if got . Contains ( netip . MustParseAddr ( ip ) ) {
2021-02-22 20:43:35 -08:00
t . Errorf ( "shrink(%q).Contains(%v) = true, want false" , test . route , ip )
}
}
2021-12-16 11:15:45 -08:00
for _ , ip := range localAddresses {
2021-03-17 17:04:32 -07:00
want := test . localIPFn ( ip )
if gotContains := got . Contains ( ip ) ; gotContains != want {
t . Errorf ( "shrink(%q).Contains(%v) = %v, want %v" , test . route , ip , gotContains , want )
}
}
2021-02-22 20:43:35 -08:00
}
}
2021-03-04 12:04:31 -08:00
func TestPeerRoutes ( t * testing . T ) {
2022-07-25 20:55:44 -07:00
pp := netip . MustParsePrefix
2021-03-04 12:04:31 -08:00
tests := [ ] struct {
name string
peers [ ] wgcfg . Peer
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want [ ] netip . Prefix
2021-03-04 12:04:31 -08:00
} {
{
name : "small_v4" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.101.102.103/32" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.101.102.103/32" ) ,
} ,
} ,
{
name : "big_v4" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.101.102.103/32" ) ,
pp ( "100.101.102.104/32" ) ,
pp ( "100.101.102.105/32" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.64.0.0/10" ) ,
} ,
} ,
{
name : "has_1_v6" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "fd7a:115c:a1e0:ab12:4843:cd96:6258:b240/128" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "fd7a:115c:a1e0::/48" ) ,
} ,
} ,
{
name : "has_2_v6" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "fd7a:115c:a1e0:ab12:4843:cd96:6258:b240/128" ) ,
pp ( "fd7a:115c:a1e0:ab12:4843:cd96:6258:b241/128" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "fd7a:115c:a1e0::/48" ) ,
} ,
} ,
{
name : "big_v4_big_v6" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.101.102.103/32" ) ,
pp ( "100.101.102.104/32" ) ,
pp ( "100.101.102.105/32" ) ,
pp ( "fd7a:115c:a1e0:ab12:4843:cd96:6258:b240/128" ) ,
pp ( "fd7a:115c:a1e0:ab12:4843:cd96:6258:b241/128" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2021-03-04 12:04:31 -08:00
pp ( "100.64.0.0/10" ) ,
2022-04-13 15:41:04 -07:00
pp ( "fd7a:115c:a1e0::/48" ) ,
} ,
} ,
{
name : "output-should-be-sorted" ,
peers : [ ] wgcfg . Peer {
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2022-04-13 15:41:04 -07:00
pp ( "100.64.0.2/32" ) ,
pp ( "10.0.0.0/16" ) ,
} ,
} ,
{
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
AllowedIPs : [ ] netip . Prefix {
2022-04-13 15:41:04 -07:00
pp ( "100.64.0.1/32" ) ,
pp ( "10.0.0.0/8" ) ,
} ,
} ,
} ,
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
want : [ ] netip . Prefix {
2022-04-13 15:41:04 -07:00
pp ( "10.0.0.0/8" ) ,
pp ( "10.0.0.0/16" ) ,
pp ( "100.64.0.1/32" ) ,
pp ( "100.64.0.2/32" ) ,
2021-03-04 12:04:31 -08:00
} ,
} ,
2023-02-19 09:17:06 -07:00
{
name : "skip-unmasked-prefixes" ,
peers : [ ] wgcfg . Peer {
{
PublicKey : key . NewNode ( ) . Public ( ) ,
AllowedIPs : [ ] netip . Prefix {
pp ( "100.64.0.2/32" ) ,
pp ( "10.0.0.100/16" ) ,
} ,
} ,
} ,
want : [ ] netip . Prefix {
pp ( "100.64.0.2/32" ) ,
} ,
} ,
2021-03-04 12:04:31 -08:00
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2023-02-19 09:17:06 -07:00
got := peerRoutes ( t . Logf , tt . peers , 2 )
2021-03-04 12:04:31 -08:00
if ! reflect . DeepEqual ( got , tt . want ) {
t . Errorf ( "got = %v; want %v" , got , tt . want )
}
} )
}
}
2021-04-04 21:35:52 -07:00
func TestPeerAPIBase ( t * testing . T ) {
tests := [ ] struct {
name string
nm * netmap . NetworkMap
peer * tailcfg . Node
want string
} {
{
name : "nil_netmap" ,
peer : new ( tailcfg . Node ) ,
want : "" ,
} ,
{
name : "nil_peer" ,
nm : new ( netmap . NetworkMap ) ,
want : "" ,
} ,
{
name : "self_only_4_them_both" ,
nm : & netmap . NetworkMap {
2023-09-18 07:31:34 +01:00
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
} ,
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
peer : & tailcfg . Node {
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
Addresses : [ ] netip . Prefix {
2022-07-25 20:55:44 -07:00
netip . MustParsePrefix ( "100.64.1.2/32" ) ,
netip . MustParsePrefix ( "fe70::2/128" ) ,
2021-04-04 21:35:52 -07:00
} ,
2022-02-15 08:19:44 -08:00
Hostinfo : ( & tailcfg . Hostinfo {
2021-04-04 21:35:52 -07:00
Services : [ ] tailcfg . Service {
{ Proto : "peerapi4" , Port : 444 } ,
{ Proto : "peerapi6" , Port : 666 } ,
} ,
2022-02-15 08:19:44 -08:00
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
want : "http://100.64.1.2:444" ,
} ,
{
name : "self_only_6_them_both" ,
nm : & netmap . NetworkMap {
2023-09-18 07:31:34 +01:00
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
peer : & tailcfg . Node {
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
Addresses : [ ] netip . Prefix {
2022-07-25 20:55:44 -07:00
netip . MustParsePrefix ( "100.64.1.2/32" ) ,
netip . MustParsePrefix ( "fe70::2/128" ) ,
2021-04-04 21:35:52 -07:00
} ,
2022-02-15 08:19:44 -08:00
Hostinfo : ( & tailcfg . Hostinfo {
2021-04-04 21:35:52 -07:00
Services : [ ] tailcfg . Service {
{ Proto : "peerapi4" , Port : 444 } ,
{ Proto : "peerapi6" , Port : 666 } ,
} ,
2022-02-15 08:19:44 -08:00
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
want : "http://[fe70::2]:666" ,
} ,
{
name : "self_both_them_only_4" ,
nm : & netmap . NetworkMap {
2023-09-18 07:31:34 +01:00
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
peer : & tailcfg . Node {
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
Addresses : [ ] netip . Prefix {
2022-07-25 20:55:44 -07:00
netip . MustParsePrefix ( "100.64.1.2/32" ) ,
netip . MustParsePrefix ( "fe70::2/128" ) ,
2021-04-04 21:35:52 -07:00
} ,
2022-02-15 08:19:44 -08:00
Hostinfo : ( & tailcfg . Hostinfo {
2021-04-04 21:35:52 -07:00
Services : [ ] tailcfg . Service {
{ Proto : "peerapi4" , Port : 444 } ,
} ,
2022-02-15 08:19:44 -08:00
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
want : "http://100.64.1.2:444" ,
} ,
{
name : "self_both_them_only_6" ,
nm : & netmap . NetworkMap {
2023-09-18 07:31:34 +01:00
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
peer : & tailcfg . Node {
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
Addresses : [ ] netip . Prefix {
2022-07-25 20:55:44 -07:00
netip . MustParsePrefix ( "100.64.1.2/32" ) ,
netip . MustParsePrefix ( "fe70::2/128" ) ,
2021-04-04 21:35:52 -07:00
} ,
2022-02-15 08:19:44 -08:00
Hostinfo : ( & tailcfg . Hostinfo {
2021-04-04 21:35:52 -07:00
Services : [ ] tailcfg . Service {
{ Proto : "peerapi6" , Port : 666 } ,
} ,
2022-02-15 08:19:44 -08:00
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
want : "http://[fe70::2]:666" ,
} ,
{
name : "self_both_them_no_peerapi_service" ,
nm : & netmap . NetworkMap {
2023-09-18 07:31:34 +01:00
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
} ) . View ( ) ,
2021-04-04 21:35:52 -07:00
} ,
peer : & tailcfg . Node {
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
Addresses : [ ] netip . Prefix {
2022-07-25 20:55:44 -07:00
netip . MustParsePrefix ( "100.64.1.2/32" ) ,
netip . MustParsePrefix ( "fe70::2/128" ) ,
2021-04-04 21:35:52 -07:00
} ,
} ,
want : "" ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2023-08-18 07:57:44 -07:00
got := peerAPIBase ( tt . nm , tt . peer . View ( ) )
2021-04-04 21:35:52 -07:00
if got != tt . want {
t . Errorf ( "got %q; want %q" , got , tt . want )
}
} )
}
}
2021-04-21 12:57:48 -07:00
type panicOnUseTransport struct { }
func ( panicOnUseTransport ) RoundTrip ( * http . Request ) ( * http . Response , error ) {
panic ( "unexpected HTTP request" )
}
2023-09-20 15:34:12 +01:00
func newTestLocalBackend ( t testing . TB ) * LocalBackend {
2025-03-20 15:18:29 -07:00
return newTestLocalBackendWithSys ( t , tsd . NewSystem ( ) )
2024-10-31 08:30:11 -07:00
}
// newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System.
// If the state store or engine are not set in sys, they will be set to a new
// in-memory store and fake userspace engine, respectively.
func newTestLocalBackendWithSys ( t testing . TB , sys * tsd . System ) * LocalBackend {
2021-04-28 08:52:04 -07:00
var logf logger . Logf = logger . Discard
2024-10-31 08:30:11 -07:00
if _ , ok := sys . StateStore . GetOK ( ) ; ! ok {
sys . Set ( new ( mem . Store ) )
}
if _ , ok := sys . Engine . GetOK ( ) ; ! ok {
2025-03-19 10:47:25 -07:00
eng , err := wgengine . NewFakeUserspaceEngine ( logf , sys . Set , sys . HealthTracker ( ) , sys . UserMetricsRegistry ( ) , sys . Bus . Get ( ) )
2024-10-31 08:30:11 -07:00
if err != nil {
t . Fatalf ( "NewFakeUserspaceEngine: %v" , err )
}
t . Cleanup ( eng . Close )
sys . Set ( eng )
2021-04-28 08:52:04 -07:00
}
2023-05-03 13:57:17 -07:00
lb , err := NewLocalBackend ( logf , logid . PublicID { } , sys , 0 )
2021-04-28 08:52:04 -07:00
if err != nil {
t . Fatalf ( "NewLocalBackend: %v" , err )
}
2024-11-22 08:25:54 -06:00
t . Cleanup ( lb . Shutdown )
2023-09-20 15:34:12 +01:00
return lb
}
// Issue 1573: don't generate a machine key if we don't want to be running.
func TestLazyMachineKeyGeneration ( t * testing . T ) {
tstest . Replace ( t , & panicOnMachineKeyGeneration , func ( ) bool { return true } )
2021-04-28 08:52:04 -07:00
2023-09-20 15:34:12 +01:00
lb := newTestLocalBackend ( t )
2021-04-28 08:52:04 -07:00
lb . SetHTTPTestClient ( & http . Client {
Transport : panicOnUseTransport { } , // validate we don't send HTTP requests
} )
2022-11-09 10:58:10 +05:00
if err := lb . Start ( ipn . Options { } ) ; err != nil {
2021-04-28 08:52:04 -07:00
t . Fatalf ( "Start: %v" , err )
}
// Give the controlclient package goroutines (if they're
// accidentally started) extra time to schedule and run (and thus
// hit panicOnUseTransport).
time . Sleep ( 500 * time . Millisecond )
}
2021-04-29 10:26:53 -07:00
2024-04-16 14:53:56 -04:00
func TestZeroExitNodeViaLocalAPI ( t * testing . T ) {
lb := newTestLocalBackend ( t )
2025-07-07 17:04:07 -05:00
user := & ipnauth . TestActor { }
2024-04-16 14:53:56 -04:00
// Give it an initial exit node in use.
2025-07-07 17:04:07 -05:00
if _ , err := lb . EditPrefsAs ( & ipn . MaskedPrefs {
2024-04-16 14:53:56 -04:00
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "foo" ,
} ,
2025-07-07 17:04:07 -05:00
} , user ) ; err != nil {
2024-04-16 14:53:56 -04:00
t . Fatalf ( "enabling first exit node: %v" , err )
}
// SetUseExitNodeEnabled(false) "remembers" the prior exit node.
2025-07-07 17:04:07 -05:00
if _ , err := lb . SetUseExitNodeEnabled ( user , false ) ; err != nil {
2024-04-16 14:53:56 -04:00
t . Fatal ( "expected failure" )
}
// Zero the exit node
2025-07-07 17:04:07 -05:00
pv , err := lb . EditPrefsAs ( & ipn . MaskedPrefs {
2024-04-16 14:53:56 -04:00
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "" ,
} ,
2025-07-07 17:04:07 -05:00
} , user )
2024-04-16 14:53:56 -04:00
if err != nil {
t . Fatalf ( "enabling first exit node: %v" , err )
}
// We just set the internal exit node to the empty string, so InternalExitNodePrior should
// also be zero'd
if got , want := pv . InternalExitNodePrior ( ) , tailcfg . StableNodeID ( "" ) ; got != want {
t . Fatalf ( "unexpected InternalExitNodePrior %q, want: %q" , got , want )
}
}
2024-04-03 10:51:51 -07:00
func TestSetUseExitNodeEnabled ( t * testing . T ) {
lb := newTestLocalBackend ( t )
2025-07-07 17:04:07 -05:00
user := & ipnauth . TestActor { }
2024-04-03 10:51:51 -07:00
// Can't turn it on if it never had an old value.
2025-07-07 17:04:07 -05:00
if _ , err := lb . SetUseExitNodeEnabled ( user , true ) ; err == nil {
2024-04-03 10:51:51 -07:00
t . Fatal ( "expected success" )
}
// But we can turn it off when it's already off.
2025-07-07 17:04:07 -05:00
if _ , err := lb . SetUseExitNodeEnabled ( user , false ) ; err != nil {
2024-04-03 10:51:51 -07:00
t . Fatal ( "expected failure" )
}
// Give it an initial exit node in use.
2025-07-07 17:04:07 -05:00
if _ , err := lb . EditPrefsAs ( & ipn . MaskedPrefs {
2024-04-03 10:51:51 -07:00
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "foo" ,
} ,
2025-07-07 17:04:07 -05:00
} , user ) ; err != nil {
2024-04-03 10:51:51 -07:00
t . Fatalf ( "enabling first exit node: %v" , err )
}
// Now turn off that exit node.
2025-07-07 17:04:07 -05:00
if prefs , err := lb . SetUseExitNodeEnabled ( user , false ) ; err != nil {
2024-04-03 10:51:51 -07:00
t . Fatal ( "expected failure" )
} else {
if g , w := prefs . ExitNodeID ( ) , tailcfg . StableNodeID ( "" ) ; g != w {
t . Fatalf ( "unexpected exit node ID %q; want %q" , g , w )
}
2024-04-16 14:53:56 -04:00
if g , w := prefs . InternalExitNodePrior ( ) , tailcfg . StableNodeID ( "foo" ) ; g != w {
2024-04-03 10:51:51 -07:00
t . Fatalf ( "unexpected exit node prior %q; want %q" , g , w )
}
}
// And turn it back on.
2025-07-07 17:04:07 -05:00
if prefs , err := lb . SetUseExitNodeEnabled ( user , true ) ; err != nil {
2024-04-03 10:51:51 -07:00
t . Fatal ( "expected failure" )
} else {
if g , w := prefs . ExitNodeID ( ) , tailcfg . StableNodeID ( "foo" ) ; g != w {
t . Fatalf ( "unexpected exit node ID %q; want %q" , g , w )
}
2024-04-16 14:53:56 -04:00
if g , w := prefs . InternalExitNodePrior ( ) , tailcfg . StableNodeID ( "foo" ) ; g != w {
2024-04-03 10:51:51 -07:00
t . Fatalf ( "unexpected exit node prior %q; want %q" , g , w )
}
}
// Verify we block setting an Internal field.
2025-07-07 17:04:07 -05:00
if _ , err := lb . EditPrefsAs ( & ipn . MaskedPrefs {
2024-04-03 10:51:51 -07:00
InternalExitNodePriorSet : true ,
2025-07-07 17:04:07 -05:00
} , user ) ; err == nil {
2024-04-03 10:51:51 -07:00
t . Fatalf ( "unexpected success; want an error trying to set an internal field" )
}
}
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
func makeExitNode ( id tailcfg . NodeID , opts ... peerOptFunc ) tailcfg . NodeView {
return makePeer ( id , append ( [ ] peerOptFunc { withCap ( 26 ) , withSuggest ( ) , withExitRoutes ( ) } , opts ... ) ... )
}
func TestConfigureExitNode ( t * testing . T ) {
controlURL := "https://localhost:1/"
exitNode1 := makeExitNode ( 1 , withName ( "node-1" ) , withDERP ( 1 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.1/32" ) ) )
exitNode2 := makeExitNode ( 2 , withName ( "node-2" ) , withDERP ( 2 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.2/32" ) ) )
selfNode := makeExitNode ( 3 , withName ( "node-3" ) , withDERP ( 1 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.3/32" ) ) )
clientNetmap := buildNetmapWithPeers ( selfNode , exitNode1 , exitNode2 )
report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 5 * time . Millisecond ,
2 : 10 * time . Millisecond ,
} ,
PreferredDERP : 1 ,
}
tests := [ ] struct {
2025-07-22 13:54:28 -07:00
name string
prefs ipn . Prefs
netMap * netmap . NetworkMap
report * netcheck . Report
changePrefs * ipn . MaskedPrefs
useExitNodeEnabled * bool
exitNodeIDPolicy * tailcfg . StableNodeID
exitNodeIPPolicy * netip . Addr
exitNodeAllowedIDs [ ] tailcfg . StableNodeID // nil if all IDs are allowed for auto exit nodes
exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true
wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs]
wantPrefs ipn . Prefs
wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled]
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID tailcfg . StableNodeID
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} {
{
name : "exit-node-id-via-prefs" , // set exit node ID via prefs
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { ExitNodeID : exitNode1 . StableID ( ) } ,
ExitNodeIDSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "exit-node-ip-via-prefs" , // set exit node IP via prefs (should be resolved to an ID)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { ExitNodeIP : exitNode1 . Addresses ( ) . At ( 0 ) . Addr ( ) } ,
ExitNodeIPSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/any" , // set auto exit node via prefs
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "any" } ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/set-exit-node-id-via-prefs" , // setting exit node ID explicitly should disable auto exit node
prefs : ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : "any" ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { ExitNodeID : exitNode2 . StableID ( ) } ,
ExitNodeIDSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) ,
AutoExitNode : "" , // should be unset
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/any/no-report" , // set auto exit node via prefs, but no report means we can't resolve the exit node ID
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "any" } ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : unresolvedExitNodeID , // cannot resolve; traffic will be dropped
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/any/no-netmap" , // similarly, but without a netmap (no exit node should be selected)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "any" } ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : unresolvedExitNodeID , // cannot resolve; traffic will be dropped
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/foo" , // set auto exit node via prefs with an unknown/unsupported expression
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "foo" } ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // unknown exit node expressions should work as "any"
AutoExitNode : "foo" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/off" , // toggle the exit node off after it was set to "any"
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "any" } ,
AutoExitNodeSet : true ,
} ,
useExitNodeEnabled : ptr . To ( false ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : "" ,
AutoExitNode : "" ,
InternalExitNodePrior : "auto:any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-exit-node-via-prefs/on" , // toggle the exit node on
prefs : ipn . Prefs {
ControlURL : controlURL ,
InternalExitNodePrior : "auto:any" ,
} ,
netMap : clientNetmap ,
report : report ,
useExitNodeEnabled : ptr . To ( true ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
AutoExitNode : "any" ,
InternalExitNodePrior : "auto:any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "id-via-policy" , // set exit node ID via syspolicy
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
exitNodeIDPolicy : ptr . To ( exitNode1 . StableID ( ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "id-via-policy/cannot-override-via-prefs/by-id" , // syspolicy should take precedence over prefs
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
exitNodeIDPolicy : ptr . To ( exitNode1 . StableID ( ) ) ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
ExitNodeID : exitNode2 . StableID ( ) , // this should be ignored
} ,
ExitNodeIDSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-22 13:54:28 -07:00
wantChangePrefsErr : errManagedByPolicy ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "id-via-policy/cannot-override-via-prefs/by-ip" , // syspolicy should take precedence over prefs
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
exitNodeIDPolicy : ptr . To ( exitNode1 . StableID ( ) ) ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
ExitNodeIP : exitNode2 . Addresses ( ) . At ( 0 ) . Addr ( ) , // this should be ignored
} ,
ExitNodeIPSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-22 13:54:28 -07:00
wantChangePrefsErr : errManagedByPolicy ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "id-via-policy/cannot-override-via-prefs/by-auto-expr" , // syspolicy should take precedence over prefs
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
exitNodeIDPolicy : ptr . To ( exitNode1 . StableID ( ) ) ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AutoExitNode : "any" , // this should be ignored
} ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
wantChangePrefsErr : errManagedByPolicy ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "ip-via-policy" , // set exit node IP via syspolicy (should be resolved to an ID)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
exitNodeIPPolicy : ptr . To ( exitNode2 . Addresses ( ) . At ( 0 ) . Addr ( ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-any-via-policy" , // set auto exit node via syspolicy (an exit node should be selected)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-any-via-policy/no-report" , // set auto exit node via syspolicy without a netcheck report (no exit node should be selected)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : nil ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : unresolvedExitNodeID ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
{
name : "auto-any-via-policy/no-netmap" , // similarly, but without a netmap (no exit node should be selected)
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : nil ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : unresolvedExitNodeID ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
2025-07-03 20:32:30 -05:00
{
name : "auto-any-via-policy/no-netmap/with-existing" , // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID
prefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) , // should be retained
} ,
netMap : nil ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowedIDs : nil , // not configured, so all exit node IDs are implicitly allowed
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
2025-07-03 20:32:30 -05:00
} ,
{
name : "auto-any-via-policy/no-netmap/with-allowed-existing" , // same, but now with a syspolicy setting that explicitly allows the existing exit node ID
prefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) , // should be retained
} ,
netMap : nil ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowedIDs : [ ] tailcfg . StableNodeID {
exitNode2 . StableID ( ) , // the current exit node ID is allowed
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) ,
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
2025-07-03 20:32:30 -05:00
} ,
{
name : "auto-any-via-policy/no-netmap/with-disallowed-existing" , // same, but now with a syspolicy setting that does not allow the existing exit node ID
prefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) , // not allowed by [syspolicy.AllowedSuggestedExitNodes]
} ,
netMap : nil ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowedIDs : [ ] tailcfg . StableNodeID {
exitNode1 . StableID ( ) , // a different exit node ID; the current one is not allowed
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : unresolvedExitNodeID , // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
2025-07-03 20:32:30 -05:00
} ,
{
name : "auto-any-via-policy/with-netmap/with-allowed-existing" , // same, but now with a syspolicy setting that does not allow the existing exit node ID
prefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // not allowed by [syspolicy.AllowedSuggestedExitNodes]
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowedIDs : [ ] tailcfg . StableNodeID {
exitNode2 . StableID ( ) , // a different exit node ID; the current one is not allowed
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) , // we have a netmap; switch to the best allowed exit node
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
2025-07-03 20:32:30 -05:00
} ,
{
name : "auto-any-via-policy/with-netmap/switch-to-better" , // if all exit nodes are allowed, switch to the best one once we have a netmap
prefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // switch to the best exit node
AutoExitNode : "any" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-03 20:32:30 -05:00
} ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
{
name : "auto-foo-via-policy" , // set auto exit node via syspolicy with an unknown/unsupported expression
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:foo" ) ) ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // unknown exit node expressions should work as "any"
AutoExitNode : "foo" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
2025-07-08 18:35:32 -05:00
{
name : "auto-foo-via-edit-prefs" , // set auto exit node via EditPrefs with an unknown/unsupported expression
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs { AutoExitNode : "foo" } ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // unknown exit node expressions should work as "any"
AutoExitNode : "foo" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-08 18:35:32 -05:00
} ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
{
name : "auto-any-via-policy/toggle-off" , // cannot toggle off the exit node if it was set via syspolicy
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
2025-07-07 17:04:07 -05:00
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
useExitNodeEnabled : ptr . To ( false ) , // should fail with an error
wantExitNodeToggleErr : errManagedByPolicy ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // still enforced by the policy setting
AutoExitNode : "any" ,
2025-07-07 17:04:07 -05:00
InternalExitNodePrior : "" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} ,
2025-07-08 16:08:28 -05:00
{
name : "auto-any-via-policy/allow-override/change" , // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride]
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowOverride : true , // allow changing the exit node
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
ExitNodeID : exitNode2 . StableID ( ) , // change the exit node ID
} ,
ExitNodeIDSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode2 . StableID ( ) , // overridden by user
AutoExitNode : "" , // cleared, as we are setting the exit node ID explicitly
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode2 . StableID ( ) ,
2025-07-08 16:08:28 -05:00
} ,
{
name : "auto-any-via-policy/allow-override/clear" , // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride]
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowOverride : true , // allow changing, but not disabling, the exit node
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
ExitNodeID : "" , // clearing the exit node ID disables the exit node and should not be allowed
} ,
ExitNodeIDSet : true ,
} ,
wantChangePrefsErr : errManagedByPolicy , // edit prefs should fail with an error
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // still enforced by the policy setting
AutoExitNode : "any" ,
InternalExitNodePrior : "" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-08 16:08:28 -05:00
} ,
{
name : "auto-any-via-policy/allow-override/toggle-off" , // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride]
prefs : ipn . Prefs {
ControlURL : controlURL ,
} ,
netMap : clientNetmap ,
report : report ,
exitNodeIDPolicy : ptr . To ( tailcfg . StableNodeID ( "auto:any" ) ) ,
exitNodeAllowOverride : true , // allow changing, but not disabling, the exit node
useExitNodeEnabled : ptr . To ( false ) , // should fail with an error
wantExitNodeToggleErr : errManagedByPolicy ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
ExitNodeID : exitNode1 . StableID ( ) , // still enforced by the policy setting
AutoExitNode : "any" ,
InternalExitNodePrior : "" ,
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-08 16:08:28 -05:00
} ,
{
name : "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node" ,
prefs : ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : ipn . AnyExitNode ,
} ,
netMap : nil , // no netmap; exit node cannot be resolved
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AutoExitNode : "" , // clear the auto exit node
} ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : "" , // cleared
ExitNodeID : "" , // has never been resolved, so it should be cleared as well
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : "" ,
2025-07-08 16:08:28 -05:00
} ,
{
name : "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node" ,
prefs : ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : ipn . AnyExitNode ,
} ,
netMap : clientNetmap , // has a netmap; exit node will be resolved
report : report ,
changePrefs : & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AutoExitNode : "" , // clear the auto exit node
} ,
AutoExitNodeSet : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : "" , // cleared
ExitNodeID : exitNode1 . StableID ( ) , // a resolved exit node ID should be retained
} ,
2025-07-23 11:50:42 -07:00
wantHostinfoExitNodeID : exitNode1 . StableID ( ) ,
2025-07-08 16:08:28 -05:00
} ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
syspolicy . RegisterWellKnownSettingsForTest ( t )
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
// Configure policy settings, if any.
2025-07-03 20:32:30 -05:00
store := source . NewTestStore ( t )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
if tt . exitNodeIDPolicy != nil {
2025-07-03 20:32:30 -05:00
store . SetStrings ( source . TestSettingOf ( syspolicy . ExitNodeID , string ( * tt . exitNodeIDPolicy ) ) )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
if tt . exitNodeIPPolicy != nil {
2025-07-03 20:32:30 -05:00
store . SetStrings ( source . TestSettingOf ( syspolicy . ExitNodeIP , tt . exitNodeIPPolicy . String ( ) ) )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
2025-07-03 20:32:30 -05:00
if tt . exitNodeAllowedIDs != nil {
store . SetStringLists ( source . TestSettingOf ( syspolicy . AllowedSuggestedExitNodes , toStrings ( tt . exitNodeAllowedIDs ) ) )
}
2025-07-08 16:08:28 -05:00
if tt . exitNodeAllowOverride {
store . SetBooleans ( source . TestSettingOf ( syspolicy . AllowExitNodeOverride , true ) )
}
2025-07-03 20:32:30 -05:00
if store . IsEmpty ( ) {
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
// No syspolicy settings, so don't register a store.
// This allows the test to run in parallel with other tests.
t . Parallel ( )
2025-07-03 20:32:30 -05:00
} else {
// Register the store for syspolicy settings to make them available to the LocalBackend.
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , store )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
// Create a new LocalBackend with the given prefs.
// Any syspolicy settings will be applied to the initial prefs.
lb := newTestLocalBackend ( t )
lb . SetPrefsForTest ( tt . prefs . Clone ( ) )
// Then set the netcheck report and netmap, if any.
if tt . report != nil {
lb . MagicConn ( ) . SetLastNetcheckReportForTest ( t . Context ( ) , tt . report )
}
if tt . netMap != nil {
lb . SetControlClientStatus ( lb . cc , controlclient . Status { NetMap : tt . netMap } )
}
2025-07-07 17:04:07 -05:00
user := & ipnauth . TestActor { }
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
// If we have a changePrefs, apply it.
if tt . changePrefs != nil {
2025-07-07 17:04:07 -05:00
_ , err := lb . EditPrefsAs ( tt . changePrefs , user )
checkError ( t , err , tt . wantChangePrefsErr , true )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
// If we need to flip exit node toggle on or off, do it.
if tt . useExitNodeEnabled != nil {
2025-07-07 17:04:07 -05:00
_ , err := lb . SetUseExitNodeEnabled ( user , * tt . useExitNodeEnabled )
checkError ( t , err , tt . wantExitNodeToggleErr , true )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
}
// Now check the prefs.
opts := [ ] cmp . Option {
cmpopts . EquateComparable ( netip . Addr { } , netip . Prefix { } ) ,
}
if diff := cmp . Diff ( & tt . wantPrefs , lb . Prefs ( ) . AsStruct ( ) , opts ... ) ; diff != "" {
t . Errorf ( "Prefs(+got -want): %v" , diff )
}
2025-07-22 13:54:28 -07:00
// And check Hostinfo.
2025-07-23 11:50:42 -07:00
if got := lb . hostinfo . ExitNodeID ; got != tt . wantHostinfoExitNodeID {
t . Errorf ( "Hostinfo.ExitNodeID got %s, want %s" , got , tt . wantHostinfoExitNodeID )
2025-07-22 13:54:28 -07:00
}
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
} )
}
}
2025-07-08 16:08:28 -05:00
func TestPrefsChangeDisablesExitNode ( t * testing . T ) {
tests := [ ] struct {
name string
netMap * netmap . NetworkMap
prefs ipn . Prefs
change ipn . MaskedPrefs
wantDisablesExitNode bool
} {
{
name : "has-exit-node-id/no-change" ,
prefs : ipn . Prefs {
ExitNodeID : "test-exit-node" ,
} ,
change : ipn . MaskedPrefs { } ,
wantDisablesExitNode : false ,
} ,
{
name : "has-exit-node-ip/no-change" ,
prefs : ipn . Prefs {
ExitNodeIP : netip . MustParseAddr ( "100.100.1.1" ) ,
} ,
change : ipn . MaskedPrefs { } ,
wantDisablesExitNode : false ,
} ,
{
name : "has-auto-exit-node/no-change" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs { } ,
wantDisablesExitNode : false ,
} ,
{
name : "has-exit-node-id/non-exit-node-change" ,
prefs : ipn . Prefs {
ExitNodeID : "test-exit-node" ,
} ,
change : ipn . MaskedPrefs {
WantRunningSet : true ,
HostnameSet : true ,
ExitNodeAllowLANAccessSet : true ,
Prefs : ipn . Prefs {
WantRunning : true ,
Hostname : "test-hostname" ,
ExitNodeAllowLANAccess : true ,
} ,
} ,
wantDisablesExitNode : false ,
} ,
{
name : "has-exit-node-ip/non-exit-node-change" ,
prefs : ipn . Prefs {
ExitNodeIP : netip . MustParseAddr ( "100.100.1.1" ) ,
} ,
change : ipn . MaskedPrefs {
WantRunningSet : true ,
RouteAllSet : true ,
ShieldsUpSet : true ,
Prefs : ipn . Prefs {
WantRunning : false ,
RouteAll : false ,
ShieldsUp : true ,
} ,
} ,
wantDisablesExitNode : false ,
} ,
{
name : "has-auto-exit-node/non-exit-node-change" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs {
CorpDNSSet : true ,
RouteAllSet : true ,
ExitNodeAllowLANAccessSet : true ,
Prefs : ipn . Prefs {
CorpDNS : true ,
RouteAll : false ,
ExitNodeAllowLANAccess : true ,
} ,
} ,
wantDisablesExitNode : false ,
} ,
{
name : "has-exit-node-id/change-exit-node-id" ,
prefs : ipn . Prefs {
ExitNodeID : "exit-node-1" ,
} ,
change : ipn . MaskedPrefs {
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "exit-node-2" ,
} ,
} ,
wantDisablesExitNode : false , // changing the exit node ID does not disable it
} ,
{
name : "has-exit-node-id/enable-auto-exit-node" ,
prefs : ipn . Prefs {
ExitNodeID : "exit-node-1" ,
} ,
change : ipn . MaskedPrefs {
AutoExitNodeSet : true ,
Prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
} ,
wantDisablesExitNode : false , // changing the exit node ID does not disable it
} ,
{
name : "has-exit-node-id/clear-exit-node-id" ,
prefs : ipn . Prefs {
ExitNodeID : "exit-node-1" ,
} ,
change : ipn . MaskedPrefs {
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "" ,
} ,
} ,
wantDisablesExitNode : true , // clearing the exit node ID disables it
} ,
{
name : "has-auto-exit-node/clear-exit-node-id" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs {
ExitNodeIDSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "" ,
} ,
} ,
wantDisablesExitNode : true , // clearing the exit node ID disables auto exit node as well...
} ,
{
name : "has-auto-exit-node/clear-exit-node-id/but-keep-auto-exit-node" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs {
ExitNodeIDSet : true ,
AutoExitNodeSet : true ,
Prefs : ipn . Prefs {
ExitNodeID : "" ,
AutoExitNode : ipn . AnyExitNode ,
} ,
} ,
wantDisablesExitNode : false , // ... unless we explicitly keep the auto exit node enabled
} ,
{
name : "has-auto-exit-node/clear-exit-node-ip" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs {
ExitNodeIPSet : true ,
Prefs : ipn . Prefs {
ExitNodeIP : netip . Addr { } ,
} ,
} ,
wantDisablesExitNode : false , // auto exit node is still enabled
} ,
{
name : "has-auto-exit-node/clear-auto-exit-node" ,
prefs : ipn . Prefs {
AutoExitNode : ipn . AnyExitNode ,
} ,
change : ipn . MaskedPrefs {
AutoExitNodeSet : true ,
Prefs : ipn . Prefs {
AutoExitNode : "" ,
} ,
} ,
wantDisablesExitNode : true , // clearing the auto exit while the exit node ID is unresolved disables exit node usage
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
lb := newTestLocalBackend ( t )
if tt . netMap != nil {
lb . SetControlClientStatus ( lb . cc , controlclient . Status { NetMap : tt . netMap } )
}
// Set the initial prefs via SetPrefsForTest
// to apply necessary adjustments.
lb . SetPrefsForTest ( tt . prefs . Clone ( ) )
initialPrefs := lb . Prefs ( )
// Check whether changeDisablesExitNodeLocked correctly identifies the change.
if got := lb . changeDisablesExitNodeLocked ( initialPrefs , & tt . change ) ; got != tt . wantDisablesExitNode {
t . Errorf ( "disablesExitNode: got %v; want %v" , got , tt . wantDisablesExitNode )
}
// Apply the change and check if it the actual behavior matches the expectation.
gotPrefs , err := lb . EditPrefsAs ( & tt . change , & ipnauth . TestActor { } )
if err != nil {
t . Fatalf ( "EditPrefsAs failed: %v" , err )
}
gotDisabledExitNode := initialPrefs . ExitNodeID ( ) != "" && gotPrefs . ExitNodeID ( ) == ""
if gotDisabledExitNode != tt . wantDisablesExitNode {
t . Errorf ( "disabledExitNode: got %v; want %v" , gotDisabledExitNode , tt . wantDisablesExitNode )
}
} )
}
}
2025-07-09 13:01:32 -05:00
func TestExitNodeNotifyOrder ( t * testing . T ) {
const controlURL = "https://localhost:1/"
report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 5 * time . Millisecond ,
2 : 10 * time . Millisecond ,
} ,
PreferredDERP : 1 ,
}
exitNode1 := makeExitNode ( 1 , withName ( "node-1" ) , withDERP ( 1 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.1/32" ) ) )
exitNode2 := makeExitNode ( 2 , withName ( "node-2" ) , withDERP ( 2 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.2/32" ) ) )
selfNode := makeExitNode ( 3 , withName ( "node-3" ) , withDERP ( 1 ) , withAddresses ( netip . MustParsePrefix ( "100.64.1.3/32" ) ) )
clientNetmap := buildNetmapWithPeers ( selfNode , exitNode1 , exitNode2 )
lb := newTestLocalBackend ( t )
lb . sys . MagicSock . Get ( ) . SetLastNetcheckReportForTest ( lb . ctx , report )
lb . SetPrefsForTest ( & ipn . Prefs {
ControlURL : controlURL ,
AutoExitNode : ipn . AnyExitNode ,
} )
nw := newNotificationWatcher ( t , lb , ipnauth . Self )
// Updating the netmap should trigger both a netmap notification
// and an exit node ID notification (since an exit node is selected).
// The netmap notification should be sent first.
nw . watch ( 0 , [ ] wantedNotification {
wantNetmapNotify ( clientNetmap ) ,
wantExitNodeIDNotify ( exitNode1 . StableID ( ) ) ,
} )
lb . SetControlClientStatus ( lb . cc , controlclient . Status { NetMap : clientNetmap } )
nw . check ( )
}
func wantNetmapNotify ( want * netmap . NetworkMap ) wantedNotification {
return wantedNotification {
name : "Netmap" ,
cond : func ( t testing . TB , _ ipnauth . Actor , n * ipn . Notify ) bool {
return n . NetMap == want
} ,
}
}
func wantExitNodeIDNotify ( want tailcfg . StableNodeID ) wantedNotification {
return wantedNotification {
name : fmt . Sprintf ( "ExitNodeID-%s" , want ) ,
cond : func ( _ testing . TB , _ ipnauth . Actor , n * ipn . Notify ) bool {
return n . Prefs != nil && n . Prefs . Valid ( ) && n . Prefs . ExitNodeID ( ) == want
} ,
}
}
2021-10-14 16:40:06 -04:00
func TestInternalAndExternalInterfaces ( t * testing . T ) {
type interfacePrefix struct {
2024-04-27 21:18:18 -07:00
i netmon . Interface
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
pfx netip . Prefix
2021-10-14 16:40:06 -04:00
}
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
masked := func ( ips ... interfacePrefix ) ( pfxs [ ] netip . Prefix ) {
2021-10-14 16:40:06 -04:00
for _ , ip := range ips {
pfxs = append ( pfxs , ip . pfx . Masked ( ) )
}
return pfxs
}
2024-04-27 21:18:18 -07:00
iList := func ( ips ... interfacePrefix ) ( il netmon . InterfaceList ) {
2021-10-14 16:40:06 -04:00
for _ , ip := range ips {
il = append ( il , ip . i )
}
return il
}
newInterface := func ( name , pfx string , wsl2 , loopback bool ) interfacePrefix {
2022-07-25 20:55:44 -07:00
ippfx := netip . MustParsePrefix ( pfx )
2024-04-27 21:18:18 -07:00
ip := netmon . Interface {
2021-10-14 16:40:06 -04:00
Interface : & net . Interface { } ,
AltAddrs : [ ] net . Addr {
2022-07-24 20:08:42 -07:00
netipx . PrefixIPNet ( ippfx ) ,
2021-10-14 16:40:06 -04:00
} ,
}
if loopback {
ip . Flags = net . FlagLoopback
}
if wsl2 {
ip . HardwareAddr = [ ] byte { 0x00 , 0x15 , 0x5d , 0x00 , 0x00 , 0x00 }
}
return interfacePrefix { i : ip , pfx : ippfx }
}
var (
en0 = newInterface ( "en0" , "10.20.2.5/16" , false , false )
en1 = newInterface ( "en1" , "192.168.1.237/24" , false , false )
wsl = newInterface ( "wsl" , "192.168.5.34/24" , true , false )
loopback = newInterface ( "lo0" , "127.0.0.1/8" , false , true )
)
tests := [ ] struct {
name string
goos string
2024-04-27 21:18:18 -07:00
il netmon . InterfaceList
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-25 21:14:09 -07:00
wantInt [ ] netip . Prefix
wantExt [ ] netip . Prefix
2021-10-14 16:40:06 -04:00
} {
{
name : "single-interface" ,
goos : "linux" ,
il : iList (
en0 ,
loopback ,
) ,
wantInt : masked ( loopback ) ,
wantExt : masked ( en0 ) ,
} ,
{
name : "multiple-interfaces" ,
goos : "linux" ,
il : iList (
en0 ,
en1 ,
wsl ,
loopback ,
) ,
wantInt : masked ( loopback ) ,
wantExt : masked ( en0 , en1 , wsl ) ,
} ,
{
name : "wsl2" ,
goos : "windows" ,
il : iList (
en0 ,
en1 ,
wsl ,
loopback ,
) ,
wantInt : masked ( loopback , wsl ) ,
wantExt : masked ( en0 , en1 ) ,
} ,
}
for _ , tc := range tests {
t . Run ( tc . name , func ( t * testing . T ) {
gotInt , gotExt , err := internalAndExternalInterfacesFrom ( tc . il , tc . goos )
if err != nil {
t . Fatal ( err )
}
if ! reflect . DeepEqual ( gotInt , tc . wantInt ) {
t . Errorf ( "unexpected internal prefixes\ngot %v\nwant %v" , gotInt , tc . wantInt )
}
if ! reflect . DeepEqual ( gotExt , tc . wantExt ) {
t . Errorf ( "unexpected external prefixes\ngot %v\nwant %v" , gotExt , tc . wantExt )
}
} )
}
}
2022-11-02 13:13:26 -07:00
func TestPacketFilterPermitsUnlockedNodes ( t * testing . T ) {
tests := [ ] struct {
name string
peers [ ] * tailcfg . Node
filter [ ] filter . Match
want bool
} {
{
name : "empty" ,
want : false ,
} ,
{
name : "no-unsigned" ,
peers : [ ] * tailcfg . Node {
{ ID : 1 } ,
} ,
want : false ,
} ,
{
name : "unsigned-good" ,
peers : [ ] * tailcfg . Node {
{ ID : 1 , UnsignedPeerAPIOnly : true } ,
} ,
want : false ,
} ,
{
name : "unsigned-bad" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
UnsignedPeerAPIOnly : true ,
AllowedIPs : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.0.0/32" ) ,
} ,
} ,
} ,
filter : [ ] filter . Match {
{
Srcs : [ ] netip . Prefix { netip . MustParsePrefix ( "100.64.0.0/32" ) } ,
Dsts : [ ] filter . NetPortRange {
{
Net : netip . MustParsePrefix ( "100.99.0.0/32" ) ,
} ,
} ,
} ,
} ,
want : true ,
} ,
{
name : "unsigned-bad-src-is-superset" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
UnsignedPeerAPIOnly : true ,
AllowedIPs : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.0.0/32" ) ,
} ,
} ,
} ,
filter : [ ] filter . Match {
{
Srcs : [ ] netip . Prefix { netip . MustParsePrefix ( "100.64.0.0/24" ) } ,
Dsts : [ ] filter . NetPortRange {
{
Net : netip . MustParsePrefix ( "100.99.0.0/32" ) ,
} ,
} ,
} ,
} ,
want : true ,
} ,
{
name : "unsigned-okay-because-no-dsts" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
UnsignedPeerAPIOnly : true ,
AllowedIPs : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.0.0/32" ) ,
} ,
} ,
} ,
filter : [ ] filter . Match {
{
Srcs : [ ] netip . Prefix { netip . MustParsePrefix ( "100.64.0.0/32" ) } ,
Caps : [ ] filter . CapMatch {
{
Dst : netip . MustParsePrefix ( "100.99.0.0/32" ) ,
Cap : "foo" ,
} ,
} ,
} ,
} ,
want : false ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2023-09-17 02:13:52 -05:00
if got := packetFilterPermitsUnlockedNodes ( peersMap ( nodeViews ( tt . peers ) ) , tt . filter ) ; got != tt . want {
2022-11-02 13:13:26 -07:00
t . Errorf ( "got %v, want %v" , got , tt . want )
}
} )
}
}
2022-11-28 18:47:05 -08:00
2024-04-03 10:55:28 -04:00
func TestStatusPeerCapabilities ( t * testing . T ) {
tests := [ ] struct {
name string
peers [ ] tailcfg . NodeView
expectedPeerCapabilities map [ tailcfg . StableNodeID ] [ ] tailcfg . NodeCapability
expectedPeerCapMap map [ tailcfg . StableNodeID ] tailcfg . NodeCapMap
} {
{
name : "peers-with-capabilities" ,
peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
ID : 1 ,
StableID : "foo" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 1 ) ,
2024-04-03 10:55:28 -04:00
IsWireGuardOnly : true ,
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
Capabilities : [ ] tailcfg . NodeCapability { tailcfg . CapabilitySSH } ,
CapMap : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilitySSH : nil ,
} ) ,
} ) . View ( ) ,
( & tailcfg . Node {
ID : 2 ,
StableID : "bar" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 2 ) ,
2024-04-03 10:55:28 -04:00
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
Capabilities : [ ] tailcfg . NodeCapability { tailcfg . CapabilityAdmin } ,
CapMap : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilityAdmin : { ` { "test": "true} ` } ,
} ) ,
} ) . View ( ) ,
2025-07-01 09:28:48 -07:00
( & tailcfg . Node {
ID : 3 ,
StableID : "baz" ,
Key : makeNodeKeyFromID ( 3 ) ,
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
Capabilities : [ ] tailcfg . NodeCapability { tailcfg . CapabilityOwner } ,
CapMap : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilityOwner : nil ,
} ) ,
} ) . View ( ) ,
2024-04-03 10:55:28 -04:00
} ,
expectedPeerCapabilities : map [ tailcfg . StableNodeID ] [ ] tailcfg . NodeCapability {
tailcfg . StableNodeID ( "foo" ) : { tailcfg . CapabilitySSH } ,
tailcfg . StableNodeID ( "bar" ) : { tailcfg . CapabilityAdmin } ,
2025-07-01 09:28:48 -07:00
tailcfg . StableNodeID ( "baz" ) : { tailcfg . CapabilityOwner } ,
2024-04-03 10:55:28 -04:00
} ,
expectedPeerCapMap : map [ tailcfg . StableNodeID ] tailcfg . NodeCapMap {
tailcfg . StableNodeID ( "foo" ) : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilitySSH : nil ,
} ) ,
tailcfg . StableNodeID ( "bar" ) : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilityAdmin : { ` { "test": "true} ` } ,
} ) ,
2025-07-01 09:28:48 -07:00
tailcfg . StableNodeID ( "baz" ) : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
tailcfg . CapabilityOwner : nil ,
} ) ,
2024-04-03 10:55:28 -04:00
} ,
} ,
{
name : "peers-without-capabilities" ,
peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
ID : 1 ,
StableID : "foo" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 1 ) ,
2024-04-03 10:55:28 -04:00
IsWireGuardOnly : true ,
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
} ) . View ( ) ,
( & tailcfg . Node {
ID : 2 ,
StableID : "bar" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 2 ) ,
2024-04-03 10:55:28 -04:00
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
} ) . View ( ) ,
} ,
} ,
}
b := newTestLocalBackend ( t )
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
b . setNetMapLocked ( & netmap . NetworkMap {
SelfNode : ( & tailcfg . Node {
MachineAuthorized : true ,
Addresses : ipps ( "100.101.101.101" ) ,
} ) . View ( ) ,
Peers : tt . peers ,
} )
got := b . Status ( )
for _ , peer := range got . Peer {
if ! reflect . DeepEqual ( peer . Capabilities , tt . expectedPeerCapabilities [ peer . ID ] ) {
t . Errorf ( "peer capabilities: expected %v got %v" , tt . expectedPeerCapabilities , peer . Capabilities )
}
if ! reflect . DeepEqual ( peer . CapMap , tt . expectedPeerCapMap [ peer . ID ] ) {
t . Errorf ( "peer capmap: expected %v got %v" , tt . expectedPeerCapMap , peer . CapMap )
}
}
} )
}
}
2022-11-28 18:47:05 -08:00
// legacyBackend was the interface between Tailscale frontends
// (e.g. cmd/tailscale, iOS/MacOS/Windows GUIs) and the tailscale
// backend (e.g. cmd/tailscaled) running on the same machine.
// (It has nothing to do with the interface between the backends
// and the cloud control plane.)
type legacyBackend interface {
// SetNotifyCallback sets the callback to be called on updates
// from the backend to the client.
SetNotifyCallback ( func ( ipn . Notify ) )
// Start starts or restarts the backend, typically when a
// frontend client connects.
Start ( ipn . Options ) error
}
// Verify that LocalBackend still implements the legacyBackend interface
// for now, at least until the macOS and iOS clients move off of it.
var _ legacyBackend = ( * LocalBackend ) ( nil )
2023-01-17 20:59:03 +00:00
func TestWatchNotificationsCallbacks ( t * testing . T ) {
b := new ( LocalBackend )
n := new ( ipn . Notify )
b . WatchNotifications ( context . Background ( ) , 0 , func ( ) {
b . mu . Lock ( )
defer b . mu . Unlock ( )
// Ensure a watcher has been installed.
if len ( b . notifyWatchers ) != 1 {
t . Fatalf ( "unexpected number of watchers in new LocalBackend, want: 1 got: %v" , len ( b . notifyWatchers ) )
}
// Send a notification. Range over notifyWatchers to get the channel
// because WatchNotifications doesn't expose the handle for it.
2023-09-18 10:30:58 -04:00
for _ , sess := range b . notifyWatchers {
2023-01-17 20:59:03 +00:00
select {
2023-09-18 10:30:58 -04:00
case sess . ch <- n :
2023-01-17 20:59:03 +00:00
default :
t . Fatalf ( "could not send notification" )
}
}
} , func ( roNotify * ipn . Notify ) bool {
if roNotify != n {
t . Fatalf ( "unexpected notification received. want: %v got: %v" , n , roNotify )
}
return false
} )
// Ensure watchers have been cleaned up.
b . mu . Lock ( )
defer b . mu . Unlock ( )
if len ( b . notifyWatchers ) != 0 {
t . Fatalf ( "unexpected number of watchers in new LocalBackend, want: 0 got: %v" , len ( b . notifyWatchers ) )
}
}
2023-09-01 19:28:00 -07:00
// tests LocalBackend.updateNetmapDeltaLocked
func TestUpdateNetmapDelta ( t * testing . T ) {
2024-02-02 12:45:32 -06:00
b := newTestLocalBackend ( t )
2025-04-24 21:54:48 -05:00
if b . currentNode ( ) . UpdateNetmapDelta ( nil ) {
2023-09-01 19:28:00 -07:00
t . Errorf ( "updateNetmapDeltaLocked() = true, want false with nil netmap" )
}
2025-04-24 21:54:48 -05:00
nm := & netmap . NetworkMap { }
2024-04-16 13:15:13 -07:00
for i := range 5 {
2025-06-18 10:31:00 -07:00
id := tailcfg . NodeID ( i + 1 )
nm . Peers = append ( nm . Peers , ( & tailcfg . Node {
ID : id ,
Key : makeNodeKeyFromID ( id ) ,
} ) . View ( ) )
2023-09-01 19:28:00 -07:00
}
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( nm )
2023-09-01 19:28:00 -07:00
someTime := time . Unix ( 123 , 0 )
muts , ok := netmap . MutationsFromMapResponse ( & tailcfg . MapResponse {
PeersChangedPatch : [ ] * tailcfg . PeerChange {
{
NodeID : 1 ,
DERPRegion : 1 ,
} ,
{
NodeID : 2 ,
Online : ptr . To ( true ) ,
} ,
{
NodeID : 3 ,
Online : ptr . To ( false ) ,
} ,
{
NodeID : 4 ,
LastSeen : ptr . To ( someTime ) ,
} ,
} ,
} , someTime )
if ! ok {
t . Fatal ( "netmap.MutationsFromMapResponse failed" )
}
2025-04-24 21:54:48 -05:00
if ! b . currentNode ( ) . UpdateNetmapDelta ( muts ) {
2023-09-01 19:28:00 -07:00
t . Fatalf ( "updateNetmapDeltaLocked() = false, want true with new netmap" )
}
wants := [ ] * tailcfg . Node {
{
2025-01-14 10:19:52 -08:00
ID : 1 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 1 ) ,
2025-01-14 10:19:52 -08:00
HomeDERP : 1 ,
2023-09-01 19:28:00 -07:00
} ,
{
ID : 2 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 2 ) ,
2023-09-01 19:28:00 -07:00
Online : ptr . To ( true ) ,
} ,
{
ID : 3 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 3 ) ,
2023-09-01 19:28:00 -07:00
Online : ptr . To ( false ) ,
} ,
{
ID : 4 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 4 ) ,
2023-09-01 19:28:00 -07:00
LastSeen : ptr . To ( someTime ) ,
} ,
}
for _ , want := range wants {
2025-06-27 00:43:48 +08:00
gotv , ok := b . currentNode ( ) . NodeByID ( want . ID )
2023-09-17 02:13:52 -05:00
if ! ok {
2025-04-24 21:54:48 -05:00
t . Errorf ( "netmap.Peer %v missing from b.profile.Peers" , want . ID )
2023-09-01 19:28:00 -07:00
continue
}
2023-09-17 02:13:52 -05:00
got := gotv . AsStruct ( )
2023-09-01 19:28:00 -07:00
if ! reflect . DeepEqual ( got , want ) {
t . Errorf ( "netmap.Peer %v wrong.\n got: %v\nwant: %v" , want . ID , logger . AsJSON ( got ) , logger . AsJSON ( want ) )
}
}
}
2023-09-20 15:34:12 +01:00
// tests WhoIs and indirectly that setNetMapLocked updates b.nodeByAddr correctly.
func TestWhoIs ( t * testing . T ) {
b := newTestLocalBackend ( t )
b . setNetMapLocked ( & netmap . NetworkMap {
SelfNode : ( & tailcfg . Node {
ID : 1 ,
User : 10 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 1 ) ,
2023-09-20 15:34:12 +01:00
Addresses : [ ] netip . Prefix { netip . MustParsePrefix ( "100.101.102.103/32" ) } ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
ID : 2 ,
User : 20 ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 2 ) ,
2023-09-20 15:34:12 +01:00
Addresses : [ ] netip . Prefix { netip . MustParsePrefix ( "100.200.200.200/32" ) } ,
} ) . View ( ) ,
} ,
2025-01-24 19:41:30 -08:00
UserProfiles : map [ tailcfg . UserID ] tailcfg . UserProfileView {
10 : ( & tailcfg . UserProfile {
2023-09-20 15:34:12 +01:00
DisplayName : "Myself" ,
2025-01-24 19:41:30 -08:00
} ) . View ( ) ,
20 : ( & tailcfg . UserProfile {
2023-09-20 15:34:12 +01:00
DisplayName : "Peer" ,
2025-01-24 19:41:30 -08:00
} ) . View ( ) ,
2023-09-20 15:34:12 +01:00
} ,
} )
tests := [ ] struct {
q string
want tailcfg . NodeID // 0 means want ok=false
wantName string
} {
{ "100.101.102.103:0" , 1 , "Myself" } ,
{ "100.101.102.103:123" , 1 , "Myself" } ,
{ "100.200.200.200:0" , 2 , "Peer" } ,
{ "100.200.200.200:123" , 2 , "Peer" } ,
{ "100.4.0.4:404" , 0 , "" } ,
}
for _ , tt := range tests {
t . Run ( tt . q , func ( t * testing . T ) {
2024-06-06 14:48:40 -04:00
nv , up , ok := b . WhoIs ( "" , netip . MustParseAddrPort ( tt . q ) )
2023-09-20 15:34:12 +01:00
var got tailcfg . NodeID
if ok {
got = nv . ID ( )
}
if got != tt . want {
t . Errorf ( "got nodeID %v; want %v" , got , tt . want )
}
if up . DisplayName != tt . wantName {
t . Errorf ( "got name %q; want %q" , up . DisplayName , tt . wantName )
}
} )
}
}
2023-09-20 13:07:48 -07:00
func TestWireguardExitNodeDNSResolvers ( t * testing . T ) {
type tc struct {
name string
id tailcfg . StableNodeID
peers [ ] * tailcfg . Node
wantOK bool
wantResolvers [ ] * dnstype . Resolver
}
tests := [ ] tc {
{
name : "no peers" ,
id : "1" ,
wantOK : false ,
wantResolvers : nil ,
} ,
{
name : "non wireguard peer" ,
id : "1" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
StableID : "1" ,
IsWireGuardOnly : false ,
ExitNodeDNSResolvers : [ ] * dnstype . Resolver { { Addr : "dns.example.com" } } ,
} ,
} ,
wantOK : false ,
wantResolvers : nil ,
} ,
{
name : "no matching IDs" ,
id : "2" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
StableID : "1" ,
IsWireGuardOnly : true ,
ExitNodeDNSResolvers : [ ] * dnstype . Resolver { { Addr : "dns.example.com" } } ,
} ,
} ,
wantOK : false ,
wantResolvers : nil ,
} ,
{
name : "wireguard peer" ,
id : "1" ,
peers : [ ] * tailcfg . Node {
{
ID : 1 ,
StableID : "1" ,
IsWireGuardOnly : true ,
ExitNodeDNSResolvers : [ ] * dnstype . Resolver { { Addr : "dns.example.com" } } ,
} ,
} ,
wantOK : true ,
wantResolvers : [ ] * dnstype . Resolver { { Addr : "dns.example.com" } } ,
} ,
}
for _ , tc := range tests {
peers := peersMap ( nodeViews ( tc . peers ) )
nm := & netmap . NetworkMap { }
gotResolvers , gotOK := wireguardExitNodeDNSResolvers ( nm , peers , tc . id )
if gotOK != tc . wantOK || ! resolversEqual ( t , gotResolvers , tc . wantResolvers ) {
t . Errorf ( "case: %s: got %v, %v, want %v, %v" , tc . name , gotOK , gotResolvers , tc . wantOK , tc . wantResolvers )
}
}
}
func TestDNSConfigForNetmapForExitNodeConfigs ( t * testing . T ) {
type tc struct {
name string
exitNode tailcfg . StableNodeID
peers [ ] tailcfg . NodeView
dnsConfig * tailcfg . DNSConfig
wantDefaultResolvers [ ] * dnstype . Resolver
wantRoutes map [ dnsname . FQDN ] [ ] * dnstype . Resolver
}
defaultResolvers := [ ] * dnstype . Resolver { { Addr : "default.example.com" } }
wgResolvers := [ ] * dnstype . Resolver { { Addr : "wg.example.com" } }
peers := [ ] tailcfg . NodeView {
( & tailcfg . Node {
ID : 1 ,
StableID : "wg" ,
IsWireGuardOnly : true ,
ExitNodeDNSResolvers : wgResolvers ,
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
} ) . View ( ) ,
// regular tailscale exit node with DNS capabilities
( & tailcfg . Node {
Cap : 26 ,
ID : 2 ,
StableID : "ts" ,
Hostinfo : ( & tailcfg . Hostinfo { } ) . View ( ) ,
} ) . View ( ) ,
}
exitDOH := peerAPIBase ( & netmap . NetworkMap { Peers : peers } , peers [ 0 ] ) + "/dns-query"
routes := map [ dnsname . FQDN ] [ ] * dnstype . Resolver {
"route.example.com." : { { Addr : "route.example.com" } } ,
}
stringifyRoutes := func ( routes map [ dnsname . FQDN ] [ ] * dnstype . Resolver ) map [ string ] [ ] * dnstype . Resolver {
if routes == nil {
return nil
}
m := make ( map [ string ] [ ] * dnstype . Resolver )
for k , v := range routes {
m [ string ( k ) ] = v
}
return m
}
tests := [ ] tc {
{
name : "noExit/noRoutes/noResolver" ,
exitNode : "" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { } ,
wantDefaultResolvers : nil ,
wantRoutes : nil ,
} ,
{
name : "tsExit/noRoutes/noResolver" ,
exitNode : "ts" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { } ,
wantDefaultResolvers : [ ] * dnstype . Resolver { { Addr : exitDOH } } ,
wantRoutes : nil ,
} ,
{
name : "tsExit/noRoutes/defaultResolver" ,
exitNode : "ts" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Resolvers : defaultResolvers } ,
wantDefaultResolvers : [ ] * dnstype . Resolver { { Addr : exitDOH } } ,
wantRoutes : nil ,
} ,
// The following two cases may need to be revisited. For a shared-in
// exit node split-DNS may effectively break, furthermore in the future
// if different nodes observe different DNS configurations, even a
// tailnet local exit node may present a different DNS configuration,
// which may not meet expectations in some use cases.
// In the case where a default resolver is set, the default resolver
// should also perhaps take precedence also.
{
name : "tsExit/routes/noResolver" ,
exitNode : "ts" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Routes : stringifyRoutes ( routes ) } ,
wantDefaultResolvers : [ ] * dnstype . Resolver { { Addr : exitDOH } } ,
wantRoutes : nil ,
} ,
{
name : "tsExit/routes/defaultResolver" ,
exitNode : "ts" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Routes : stringifyRoutes ( routes ) , Resolvers : defaultResolvers } ,
wantDefaultResolvers : [ ] * dnstype . Resolver { { Addr : exitDOH } } ,
wantRoutes : nil ,
} ,
// WireGuard exit nodes with DNS capabilities provide a "fallback" type
// behavior, they have a lower precedence than a default resolver, but
// otherwise allow split-DNS to operate as normal, and are used when
// there is no default resolver.
{
name : "wgExit/noRoutes/noResolver" ,
exitNode : "wg" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { } ,
wantDefaultResolvers : wgResolvers ,
wantRoutes : nil ,
} ,
{
name : "wgExit/noRoutes/defaultResolver" ,
exitNode : "wg" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Resolvers : defaultResolvers } ,
wantDefaultResolvers : defaultResolvers ,
wantRoutes : nil ,
} ,
{
name : "wgExit/routes/defaultResolver" ,
exitNode : "wg" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Routes : stringifyRoutes ( routes ) , Resolvers : defaultResolvers } ,
wantDefaultResolvers : defaultResolvers ,
wantRoutes : routes ,
} ,
{
name : "wgExit/routes/noResolver" ,
exitNode : "wg" ,
peers : peers ,
dnsConfig : & tailcfg . DNSConfig { Routes : stringifyRoutes ( routes ) } ,
wantDefaultResolvers : wgResolvers ,
wantRoutes : routes ,
} ,
}
for _ , tc := range tests {
t . Run ( tc . name , func ( t * testing . T ) {
nm := & netmap . NetworkMap {
Peers : tc . peers ,
DNS : * tc . dnsConfig ,
}
prefs := & ipn . Prefs { ExitNodeID : tc . exitNode , CorpDNS : true }
2024-09-09 15:15:29 -04:00
got := dnsConfigForNetmap ( nm , peersMap ( tc . peers ) , prefs . View ( ) , false , t . Logf , "" )
2023-09-20 13:07:48 -07:00
if ! resolversEqual ( t , got . DefaultResolvers , tc . wantDefaultResolvers ) {
t . Errorf ( "DefaultResolvers: got %#v, want %#v" , got . DefaultResolvers , tc . wantDefaultResolvers )
}
if ! routesEqual ( t , got . Routes , tc . wantRoutes ) {
t . Errorf ( "Routes: got %#v, want %#v" , got . Routes , tc . wantRoutes )
}
} )
}
}
2023-10-27 14:20:10 -07:00
func TestOfferingAppConnector ( t * testing . T ) {
2024-04-11 10:12:13 -07:00
for _ , shouldStore := range [ ] bool { false , true } {
b := newTestBackend ( t )
if b . OfferingAppConnector ( ) {
t . Fatal ( "unexpected offering app connector" )
}
if shouldStore {
b . appConnector = appc . NewAppConnector ( t . Logf , nil , & appc . RouteInfo { } , fakeStoreRoutes )
} else {
b . appConnector = appc . NewAppConnector ( t . Logf , nil , nil , nil )
}
if ! b . OfferingAppConnector ( ) {
t . Fatal ( "unexpected not offering app connector" )
}
2023-10-27 14:20:10 -07:00
}
}
func TestRouteAdvertiser ( t * testing . T ) {
b := newTestBackend ( t )
testPrefix := netip . MustParsePrefix ( "192.0.0.8/32" )
ra := appc . RouteAdvertiser ( b )
must . Do ( ra . AdvertiseRoute ( testPrefix ) )
routes := b . Prefs ( ) . AdvertiseRoutes ( )
if routes . Len ( ) != 1 || routes . At ( 0 ) != testPrefix {
t . Fatalf ( "got routes %v, want %v" , routes , [ ] netip . Prefix { testPrefix } )
}
2024-01-17 11:35:55 -08:00
must . Do ( ra . UnadvertiseRoute ( testPrefix ) )
routes = b . Prefs ( ) . AdvertiseRoutes ( )
if routes . Len ( ) != 0 {
t . Fatalf ( "got routes %v, want none" , routes )
}
2023-10-27 14:20:10 -07:00
}
2023-12-19 09:33:38 -08:00
func TestRouterAdvertiserIgnoresContainedRoutes ( t * testing . T ) {
b := newTestBackend ( t )
testPrefix := netip . MustParsePrefix ( "192.0.0.0/24" )
ra := appc . RouteAdvertiser ( b )
must . Do ( ra . AdvertiseRoute ( testPrefix ) )
routes := b . Prefs ( ) . AdvertiseRoutes ( )
if routes . Len ( ) != 1 || routes . At ( 0 ) != testPrefix {
t . Fatalf ( "got routes %v, want %v" , routes , [ ] netip . Prefix { testPrefix } )
}
must . Do ( ra . AdvertiseRoute ( netip . MustParsePrefix ( "192.0.0.8/32" ) ) )
// the above /32 is not added as it is contained within the /24
routes = b . Prefs ( ) . AdvertiseRoutes ( )
if routes . Len ( ) != 1 || routes . At ( 0 ) != testPrefix {
t . Fatalf ( "got routes %v, want %v" , routes , [ ] netip . Prefix { testPrefix } )
}
}
2023-10-27 14:20:10 -07:00
func TestObserveDNSResponse ( t * testing . T ) {
2024-04-11 10:12:13 -07:00
for _ , shouldStore := range [ ] bool { false , true } {
b := newTestBackend ( t )
2023-10-27 14:20:10 -07:00
2024-04-11 10:12:13 -07:00
// ensure no error when no app connector is configured
2025-01-23 09:03:56 -08:00
if err := b . ObserveDNSResponse ( dnsResponse ( "example.com." , "192.0.0.8" ) ) ; err != nil {
t . Errorf ( "ObserveDNSResponse: %v" , err )
}
2023-10-27 14:20:10 -07:00
2024-04-11 10:12:13 -07:00
rc := & appctest . RouteCollector { }
if shouldStore {
b . appConnector = appc . NewAppConnector ( t . Logf , rc , & appc . RouteInfo { } , fakeStoreRoutes )
} else {
b . appConnector = appc . NewAppConnector ( t . Logf , rc , nil , nil )
}
b . appConnector . UpdateDomains ( [ ] string { "example.com" } )
b . appConnector . Wait ( context . Background ( ) )
2025-01-23 09:03:56 -08:00
if err := b . ObserveDNSResponse ( dnsResponse ( "example.com." , "192.0.0.8" ) ) ; err != nil {
t . Errorf ( "ObserveDNSResponse: %v" , err )
}
2024-04-11 10:12:13 -07:00
b . appConnector . Wait ( context . Background ( ) )
wantRoutes := [ ] netip . Prefix { netip . MustParsePrefix ( "192.0.0.8/32" ) }
if ! slices . Equal ( rc . Routes ( ) , wantRoutes ) {
t . Fatalf ( "got routes %v, want %v" , rc . Routes ( ) , wantRoutes )
}
2024-01-22 16:57:31 -08:00
}
}
2024-01-31 17:36:39 -08:00
func TestCoveredRouteRangeNoDefault ( t * testing . T ) {
2024-01-22 16:57:31 -08:00
tests := [ ] struct {
existingRoute netip . Prefix
newRoute netip . Prefix
want bool
} {
{
existingRoute : netip . MustParsePrefix ( "192.0.0.1/32" ) ,
newRoute : netip . MustParsePrefix ( "192.0.0.1/32" ) ,
want : true ,
} ,
{
existingRoute : netip . MustParsePrefix ( "192.0.0.1/32" ) ,
newRoute : netip . MustParsePrefix ( "192.0.0.2/32" ) ,
want : false ,
} ,
{
existingRoute : netip . MustParsePrefix ( "192.0.0.0/24" ) ,
newRoute : netip . MustParsePrefix ( "192.0.0.1/32" ) ,
want : true ,
} ,
{
existingRoute : netip . MustParsePrefix ( "192.0.0.0/16" ) ,
newRoute : netip . MustParsePrefix ( "192.0.0.0/24" ) ,
want : true ,
} ,
2024-01-31 17:36:39 -08:00
{
existingRoute : netip . MustParsePrefix ( "0.0.0.0/0" ) ,
newRoute : netip . MustParsePrefix ( "192.0.0.0/24" ) ,
want : false ,
} ,
{
existingRoute : netip . MustParsePrefix ( "::/0" ) ,
newRoute : netip . MustParsePrefix ( "2001:db8::/32" ) ,
want : false ,
} ,
2024-01-22 16:57:31 -08:00
}
for _ , tt := range tests {
2024-01-31 17:36:39 -08:00
got := coveredRouteRangeNoDefault ( [ ] netip . Prefix { tt . existingRoute } , tt . newRoute )
2024-01-22 16:57:31 -08:00
if got != tt . want {
t . Errorf ( "coveredRouteRange(%v, %v) = %v, want %v" , tt . existingRoute , tt . newRoute , got , tt . want )
}
2023-10-27 14:20:10 -07:00
}
}
2023-10-31 14:59:18 -07:00
func TestReconfigureAppConnector ( t * testing . T ) {
b := newTestBackend ( t )
2025-04-24 21:54:48 -05:00
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2023-10-31 14:59:18 -07:00
if b . appConnector != nil {
t . Fatal ( "unexpected app connector" )
}
b . EditPrefs ( & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AppConnector : ipn . AppConnectorPrefs {
Advertise : true ,
} ,
} ,
AppConnectorSet : true ,
} )
2025-04-24 21:54:48 -05:00
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2023-10-31 14:59:18 -07:00
if b . appConnector == nil {
t . Fatal ( "expected app connector" )
}
appCfg := ` {
"name" : "example" ,
"domains" : [ "example.com" ] ,
"connectors" : [ "tag:example" ]
} `
2025-04-24 21:54:48 -05:00
nm := & netmap . NetworkMap {
SelfNode : ( & tailcfg . Node {
Name : "example.ts.net" ,
Tags : [ ] string { "tag:example" } ,
CapMap : ( tailcfg . NodeCapMap ) ( map [ tailcfg . NodeCapability ] [ ] tailcfg . RawMessage {
"tailscale.com/app-connectors" : { tailcfg . RawMessage ( appCfg ) } ,
} ) ,
} ) . View ( ) ,
}
2023-10-31 14:59:18 -07:00
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( nm )
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2024-01-18 10:18:25 -08:00
b . appConnector . Wait ( context . Background ( ) )
2023-10-31 14:59:18 -07:00
want := [ ] string { "example.com" }
if ! slices . Equal ( b . appConnector . Domains ( ) . AsSlice ( ) , want ) {
t . Fatalf ( "got domains %v, want %v" , b . appConnector . Domains ( ) , want )
}
2023-11-09 18:00:56 -08:00
if v , _ := b . hostinfo . AppConnector . Get ( ) ; ! v {
t . Fatalf ( "expected app connector service" )
}
2023-11-09 17:21:10 -08:00
// disable the connector in order to assert that the service is removed
b . EditPrefs ( & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AppConnector : ipn . AppConnectorPrefs {
Advertise : false ,
} ,
} ,
AppConnectorSet : true ,
} )
2025-04-24 21:54:48 -05:00
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2023-11-09 17:21:10 -08:00
if b . appConnector != nil {
t . Fatal ( "expected no app connector" )
}
2023-11-09 18:00:56 -08:00
if v , _ := b . hostinfo . AppConnector . Get ( ) ; v {
t . Fatalf ( "expected no app connector service" )
}
2023-11-09 17:21:10 -08:00
}
2025-01-22 16:50:25 -08:00
func TestBackfillAppConnectorRoutes ( t * testing . T ) {
// Create backend with an empty app connector.
b := newTestBackend ( t )
2025-03-17 14:58:25 -05:00
// newTestBackend creates a backend with a non-nil netmap,
// but this test requires a nil netmap.
// Otherwise, instead of backfilling, [LocalBackend.reconfigAppConnectorLocked]
// uses the domains and routes from netmap's [appctype.AppConnectorAttr].
// Additionally, a non-nil netmap makes reconfigAppConnectorLocked
// asynchronous, resulting in a flaky test.
// Therefore, we set the netmap to nil to simulate a fresh backend start
// or a profile switch where the netmap is not yet available.
b . setNetMapLocked ( nil )
2025-01-22 16:50:25 -08:00
if err := b . Start ( ipn . Options { } ) ; err != nil {
t . Fatal ( err )
}
if _ , err := b . EditPrefs ( & ipn . MaskedPrefs {
Prefs : ipn . Prefs {
AppConnector : ipn . AppConnectorPrefs { Advertise : true } ,
} ,
AppConnectorSet : true ,
} ) ; err != nil {
t . Fatal ( err )
}
2025-04-24 21:54:48 -05:00
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2025-01-22 16:50:25 -08:00
// Smoke check that AdvertiseRoutes doesn't have the test IP.
ip := netip . MustParseAddr ( "1.2.3.4" )
routes := b . Prefs ( ) . AdvertiseRoutes ( ) . AsSlice ( )
if slices . Contains ( routes , netip . PrefixFrom ( ip , ip . BitLen ( ) ) ) {
t . Fatalf ( "AdvertiseRoutes %v on a fresh backend already contains advertised route for %v" , routes , ip )
}
// Store the test IP in profile data, but not in Prefs.AdvertiseRoutes.
b . ControlKnobs ( ) . AppCStoreRoutes . Store ( true )
if err := b . storeRouteInfo ( & appc . RouteInfo {
Domains : map [ string ] [ ] netip . Addr {
"example.com" : { ip } ,
} ,
} ) ; err != nil {
t . Fatal ( err )
}
// Mimic b.authReconfigure for the app connector bits.
b . mu . Lock ( )
2025-04-24 21:54:48 -05:00
b . reconfigAppConnectorLocked ( b . NetMap ( ) , b . pm . prefs )
2025-01-22 16:50:25 -08:00
b . mu . Unlock ( )
b . readvertiseAppConnectorRoutes ( )
// Check that Prefs.AdvertiseRoutes got backfilled with routes stored in
// profile data.
routes = b . Prefs ( ) . AdvertiseRoutes ( ) . AsSlice ( )
if ! slices . Contains ( routes , netip . PrefixFrom ( ip , ip . BitLen ( ) ) ) {
t . Fatalf ( "AdvertiseRoutes %v was not backfilled from stored app connector routes with %v" , routes , ip )
}
}
2023-09-20 13:07:48 -07:00
func resolversEqual ( t * testing . T , a , b [ ] * dnstype . Resolver ) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
t . Errorf ( "resolversEqual: a == nil || b == nil : %#v != %#v" , a , b )
return false
}
if len ( a ) != len ( b ) {
t . Errorf ( "resolversEqual: len(a) != len(b) : %#v != %#v" , a , b )
return false
}
for i := range a {
if ! a [ i ] . Equal ( b [ i ] ) {
t . Errorf ( "resolversEqual: a != b [%d]: %v != %v" , i , * a [ i ] , * b [ i ] )
return false
}
}
return true
}
func routesEqual ( t * testing . T , a , b map [ dnsname . FQDN ] [ ] * dnstype . Resolver ) bool {
if len ( a ) != len ( b ) {
t . Logf ( "routes: len(a) != len(b): %d != %d" , len ( a ) , len ( b ) )
return false
}
for name := range a {
if ! resolversEqual ( t , a [ name ] , b [ name ] ) {
t . Logf ( "routes: a != b [%s]: %v != %v" , name , a [ name ] , b [ name ] )
return false
}
}
return true
}
2023-10-27 14:20:10 -07:00
// dnsResponse is a test helper that creates a DNS response buffer for the given domain and address
func dnsResponse ( domain , address string ) [ ] byte {
addr := netip . MustParseAddr ( address )
b := dnsmessage . NewBuilder ( nil , dnsmessage . Header { } )
b . EnableCompression ( )
b . StartAnswers ( )
switch addr . BitLen ( ) {
case 32 :
b . AResource (
dnsmessage . ResourceHeader {
Name : dnsmessage . MustNewName ( domain ) ,
Type : dnsmessage . TypeA ,
Class : dnsmessage . ClassINET ,
TTL : 0 ,
} ,
dnsmessage . AResource {
A : addr . As4 ( ) ,
} ,
)
case 128 :
b . AAAAResource (
dnsmessage . ResourceHeader {
Name : dnsmessage . MustNewName ( domain ) ,
Type : dnsmessage . TypeAAAA ,
Class : dnsmessage . ClassINET ,
TTL : 0 ,
} ,
dnsmessage . AAAAResource {
AAAA : addr . As16 ( ) ,
} ,
)
default :
panic ( "invalid address length" )
}
return must . Get ( b . Finish ( ) )
}
2023-11-29 16:48:25 -05:00
func TestSetExitNodeIDPolicy ( t * testing . T ) {
2025-06-18 10:31:00 -07:00
zeroValHostinfoView := new ( tailcfg . Hostinfo ) . View ( )
2023-11-29 16:48:25 -05:00
pfx := netip . MustParsePrefix
tests := [ ] struct {
2024-06-28 23:17:31 -04:00
name string
exitNodeIPKey bool
exitNodeIDKey bool
exitNodeID string
exitNodeIP string
prefs * ipn . Prefs
exitNodeIPWant string
exitNodeIDWant string
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
autoExitNodeWant ipn . ExitNodeExpression
2024-06-28 23:17:31 -04:00
prefsChanged bool
nm * netmap . NetworkMap
lastSuggestedExitNode tailcfg . StableNodeID
2023-11-29 16:48:25 -05:00
} {
{
name : "ExitNodeID key is set" ,
exitNodeIDKey : true ,
exitNodeID : "123" ,
exitNodeIDWant : "123" ,
prefsChanged : true ,
} ,
{
name : "ExitNodeID key not set" ,
exitNodeIDKey : true ,
exitNodeIDWant : "" ,
prefsChanged : false ,
} ,
{
name : "ExitNodeID key set, ExitNodeIP preference set" ,
exitNodeIDKey : true ,
exitNodeID : "123" ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIDWant : "123" ,
prefsChanged : true ,
} ,
{
name : "ExitNodeID key not set, ExitNodeIP key set" ,
exitNodeIPKey : true ,
exitNodeIP : "127.0.0.1" ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIPWant : "127.0.0.1" ,
prefsChanged : false ,
} ,
{
name : "ExitNodeIP key set, existing ExitNodeIP pref" ,
exitNodeIPKey : true ,
exitNodeIP : "127.0.0.1" ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIPWant : "127.0.0.1" ,
prefsChanged : false ,
} ,
{
name : "existing preferences match policy" ,
exitNodeIDKey : true ,
exitNodeID : "123" ,
prefs : & ipn . Prefs { ExitNodeID : tailcfg . StableNodeID ( "123" ) } ,
exitNodeIDWant : "123" ,
prefsChanged : false ,
} ,
{
name : "ExitNodeIP set if net map does not have corresponding node" ,
exitNodeIPKey : true ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIP : "127.0.0.1" ,
exitNodeIPWant : "127.0.0.1" ,
prefsChanged : false ,
nm : & netmap . NetworkMap {
Name : "foo.tailnet" ,
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
pfx ( "100.102.103.104/32" ) ,
pfx ( "100::123/128" ) ,
} ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 201 ,
2023-11-29 16:48:25 -05:00
Name : "a.tailnet" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 201 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100.0.0.201/32" ) ,
pfx ( "100::201/128" ) ,
} ,
} ) . View ( ) ,
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 202 ,
2023-11-29 16:48:25 -05:00
Name : "b.tailnet" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 202 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100::202/128" ) ,
} ,
} ) . View ( ) ,
} ,
} ,
} ,
{
name : "ExitNodeIP cleared if net map has corresponding node - policy matches prefs" ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIPKey : true ,
exitNodeIP : "127.0.0.1" ,
exitNodeIPWant : "" ,
exitNodeIDWant : "123" ,
prefsChanged : true ,
nm : & netmap . NetworkMap {
Name : "foo.tailnet" ,
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
pfx ( "100.102.103.104/32" ) ,
pfx ( "100::123/128" ) ,
} ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 123 ,
2023-11-29 16:48:25 -05:00
Name : "a.tailnet" ,
StableID : tailcfg . StableNodeID ( "123" ) ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 123 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "127.0.0.1/32" ) ,
pfx ( "100::201/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 202 ,
2023-11-29 16:48:25 -05:00
Name : "b.tailnet" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 202 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100::202/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
} ,
} ,
} ,
{
name : "ExitNodeIP cleared if net map has corresponding node - no policy set" ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIPWant : "" ,
exitNodeIDWant : "123" ,
prefsChanged : true ,
nm : & netmap . NetworkMap {
Name : "foo.tailnet" ,
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
pfx ( "100.102.103.104/32" ) ,
pfx ( "100::123/128" ) ,
} ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 123 ,
2023-11-29 16:48:25 -05:00
Name : "a.tailnet" ,
StableID : tailcfg . StableNodeID ( "123" ) ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 123 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "127.0.0.1/32" ) ,
pfx ( "100::201/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 202 ,
2023-11-29 16:48:25 -05:00
Name : "b.tailnet" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 202 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100::202/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
} ,
} ,
} ,
{
name : "ExitNodeIP cleared if net map has corresponding node - different exit node IP in policy" ,
exitNodeIPKey : true ,
prefs : & ipn . Prefs { ExitNodeIP : netip . MustParseAddr ( "127.0.0.1" ) } ,
exitNodeIP : "100.64.5.6" ,
exitNodeIPWant : "" ,
exitNodeIDWant : "123" ,
prefsChanged : true ,
nm : & netmap . NetworkMap {
Name : "foo.tailnet" ,
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix {
pfx ( "100.102.103.104/32" ) ,
pfx ( "100::123/128" ) ,
} ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 123 ,
2023-11-29 16:48:25 -05:00
Name : "a.tailnet" ,
StableID : tailcfg . StableNodeID ( "123" ) ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 123 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100.64.5.6/32" ) ,
pfx ( "100::201/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
( & tailcfg . Node {
2025-06-18 10:31:00 -07:00
ID : 202 ,
2023-11-29 16:48:25 -05:00
Name : "b.tailnet" ,
2025-06-18 10:31:00 -07:00
Key : makeNodeKeyFromID ( 202 ) ,
2023-11-29 16:48:25 -05:00
Addresses : [ ] netip . Prefix {
pfx ( "100::202/128" ) ,
} ,
2025-06-18 10:31:00 -07:00
Hostinfo : zeroValHostinfoView ,
2023-11-29 16:48:25 -05:00
} ) . View ( ) ,
} ,
} ,
} ,
2024-06-28 23:17:31 -04:00
{
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
name : "ExitNodeID key is set to auto:any and last suggested exit node is populated" ,
2024-06-28 23:17:31 -04:00
exitNodeIDKey : true ,
exitNodeID : "auto:any" ,
lastSuggestedExitNode : "123" ,
exitNodeIDWant : "123" ,
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
autoExitNodeWant : "any" ,
2024-06-28 23:17:31 -04:00
prefsChanged : true ,
} ,
{
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
name : "ExitNodeID key is set to auto:any and last suggested exit node is not populated" ,
exitNodeIDKey : true ,
exitNodeID : "auto:any" ,
exitNodeIDWant : "auto:any" ,
autoExitNodeWant : "any" ,
prefsChanged : true ,
} ,
{
name : "ExitNodeID key is set to auto:foo and last suggested exit node is populated" ,
exitNodeIDKey : true ,
exitNodeID : "auto:foo" ,
lastSuggestedExitNode : "123" ,
exitNodeIDWant : "123" ,
autoExitNodeWant : "foo" ,
prefsChanged : true ,
} ,
{
name : "ExitNodeID key is set to auto:foo and last suggested exit node is not populated" ,
exitNodeIDKey : true ,
exitNodeID : "auto:foo" ,
exitNodeIDWant : "auto:any" , // should be "auto:any" for compatibility with existing clients
autoExitNodeWant : "foo" ,
prefsChanged : true ,
2024-06-28 23:17:31 -04:00
} ,
2023-11-29 16:48:25 -05:00
}
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
2023-11-29 16:48:25 -05:00
for _ , test := range tests {
t . Run ( test . name , func ( t * testing . T ) {
b := newTestBackend ( t )
2024-10-08 10:50:14 -05:00
2024-11-21 19:29:20 -06:00
policyStore := source . NewTestStore ( t )
if test . exitNodeIDKey {
policyStore . SetStrings ( source . TestSettingOf ( syspolicy . ExitNodeID , test . exitNodeID ) )
}
if test . exitNodeIPKey {
policyStore . SetStrings ( source . TestSettingOf ( syspolicy . ExitNodeIP , test . exitNodeIP ) )
}
2024-10-08 10:50:14 -05:00
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2023-11-29 16:48:25 -05:00
if test . nm == nil {
test . nm = new ( netmap . NetworkMap )
}
if test . prefs == nil {
test . prefs = ipn . NewPrefs ( )
}
2024-05-03 10:59:22 -04:00
pm := must . Get ( newProfileManager ( new ( mem . Store ) , t . Logf , new ( health . Tracker ) ) )
2023-11-29 16:48:25 -05:00
pm . prefs = test . prefs . View ( )
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( test . nm )
2023-11-29 16:48:25 -05:00
b . pm = pm
2024-06-28 23:17:31 -04:00
b . lastSuggestedExitNode = test . lastSuggestedExitNode
2024-11-21 19:29:20 -06:00
prefs := b . pm . prefs . AsStruct ( )
2025-07-03 19:37:56 -05:00
if changed := b . reconcilePrefsLocked ( prefs ) ; changed != test . prefsChanged {
2024-11-21 19:29:20 -06:00
t . Errorf ( "wanted prefs changed %v, got prefs changed %v" , test . prefsChanged , changed )
}
// Both [LocalBackend.SetPrefsForTest] and [LocalBackend.EditPrefs]
// apply syspolicy settings to the current profile's preferences. Therefore,
// we pass the current, unmodified preferences and expect the effective
// preferences to change.
2024-04-14 19:47:32 -07:00
b . SetPrefsForTest ( pm . CurrentPrefs ( ) . AsStruct ( ) )
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
if got := b . Prefs ( ) . ExitNodeID ( ) ; got != tailcfg . StableNodeID ( test . exitNodeIDWant ) {
t . Errorf ( "ExitNodeID: got %q; want %q" , got , test . exitNodeIDWant )
2023-11-29 16:48:25 -05:00
}
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
if got := b . Prefs ( ) . ExitNodeIP ( ) ; test . exitNodeIPWant == "" {
2023-11-01 17:20:25 -04:00
if got . String ( ) != "invalid IP" {
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
t . Errorf ( "ExitNodeIP: got %v want invalid IP" , got )
2023-11-29 16:48:25 -05:00
}
2023-11-01 17:20:25 -04:00
} else if got . String ( ) != test . exitNodeIPWant {
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
t . Errorf ( "ExitNodeIP: got %q; want %q" , got , test . exitNodeIPWant )
}
if got := b . Prefs ( ) . AutoExitNode ( ) ; got != test . autoExitNodeWant {
t . Errorf ( "AutoExitNode: got %q; want %q" , got , test . autoExitNodeWant )
2023-11-29 16:48:25 -05:00
}
} )
}
}
2023-11-01 17:20:25 -04:00
2024-06-28 23:17:31 -04:00
func TestUpdateNetmapDeltaAutoExitNode ( t * testing . T ) {
2025-07-03 11:50:27 -05:00
peer1 := makePeer ( 1 , withCap ( 26 ) , withSuggest ( ) , withOnline ( true ) , withExitRoutes ( ) )
peer2 := makePeer ( 2 , withCap ( 26 ) , withSuggest ( ) , withOnline ( true ) , withExitRoutes ( ) )
2024-06-28 23:17:31 -04:00
derpMap := & tailcfg . DERPMap {
Regions : map [ int ] * tailcfg . DERPRegion {
1 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t1" ,
RegionID : 1 ,
} ,
} ,
} ,
2 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t2" ,
RegionID : 2 ,
} ,
} ,
} ,
} ,
}
report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 5 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
PreferredDERP : 2 ,
}
tests := [ ] struct {
2025-01-03 14:30:02 -08:00
name string
lastSuggestedExitNode tailcfg . StableNodeID
netmap * netmap . NetworkMap
muts [ ] * tailcfg . PeerChange
exitNodeIDWant tailcfg . StableNodeID
report * netcheck . Report
2024-06-28 23:17:31 -04:00
} {
{
2025-01-03 14:30:02 -08:00
// selected auto exit node goes offline
2025-07-03 11:50:27 -05:00
name : "exit-node-goes-offline" ,
// PreferredDERP is 2, and it's also the region with the lowest latency.
// So, peer2 should be selected as the exit node.
lastSuggestedExitNode : peer2 . StableID ( ) ,
2024-06-28 23:17:31 -04:00
netmap : & netmap . NetworkMap {
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2 ,
} ,
DERPMap : derpMap ,
} ,
muts : [ ] * tailcfg . PeerChange {
{
NodeID : 1 ,
2025-07-03 11:50:27 -05:00
Online : ptr . To ( true ) ,
2024-06-28 23:17:31 -04:00
} ,
{
NodeID : 2 ,
2025-07-03 11:50:27 -05:00
Online : ptr . To ( false ) , // the selected exit node goes offline
2024-06-28 23:17:31 -04:00
} ,
} ,
2025-07-03 11:50:27 -05:00
exitNodeIDWant : peer1 . StableID ( ) ,
2025-01-03 14:30:02 -08:00
report : report ,
2024-06-28 23:17:31 -04:00
} ,
{
2025-01-03 14:30:02 -08:00
// other exit node goes offline doesn't change selected auto exit node that's still online
name : "other-node-goes-offline" ,
2024-06-28 23:17:31 -04:00
lastSuggestedExitNode : peer2 . StableID ( ) ,
netmap : & netmap . NetworkMap {
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2 ,
} ,
DERPMap : derpMap ,
} ,
muts : [ ] * tailcfg . PeerChange {
{
NodeID : 1 ,
2025-07-03 11:50:27 -05:00
Online : ptr . To ( false ) , // a different exit node goes offline
2024-06-28 23:17:31 -04:00
} ,
{
NodeID : 2 ,
Online : ptr . To ( true ) ,
} ,
} ,
2025-01-03 14:30:02 -08:00
exitNodeIDWant : peer2 . StableID ( ) ,
report : report ,
2024-06-28 23:17:31 -04:00
} ,
}
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
policyStore := source . NewTestStoreOf ( t , source . TestSettingOf (
syspolicy . ExitNodeID , "auto:any" ,
) )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2024-06-28 23:17:31 -04:00
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
b := newTestLocalBackend ( t )
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( tt . netmap )
2024-06-28 23:17:31 -04:00
b . lastSuggestedExitNode = tt . lastSuggestedExitNode
b . sys . MagicSock . Get ( ) . SetLastNetcheckReportForTest ( b . ctx , tt . report )
b . SetPrefsForTest ( b . pm . CurrentPrefs ( ) . AsStruct ( ) )
2025-01-03 14:30:02 -08:00
allDone := make ( chan bool , 1 )
defer b . goTracker . AddDoneCallback ( func ( ) {
b . mu . Lock ( )
defer b . mu . Unlock ( )
if b . goTracker . RunningGoroutines ( ) > 0 {
return
}
select {
case allDone <- true :
default :
}
} ) ( )
2024-06-28 23:17:31 -04:00
someTime := time . Unix ( 123 , 0 )
muts , ok := netmap . MutationsFromMapResponse ( & tailcfg . MapResponse {
PeersChangedPatch : tt . muts ,
} , someTime )
if ! ok {
t . Fatal ( "netmap.MutationsFromMapResponse failed" )
}
2025-01-03 14:30:02 -08:00
2024-06-28 23:17:31 -04:00
if b . pm . prefs . ExitNodeID ( ) != tt . lastSuggestedExitNode {
t . Fatalf ( "did not set exit node ID to last suggested exit node despite auto policy" )
}
2025-01-03 14:30:02 -08:00
was := b . goTracker . StartedGoroutines ( )
2024-06-28 23:17:31 -04:00
got := b . UpdateNetmapDelta ( muts )
2025-01-03 14:30:02 -08:00
if ! got {
t . Error ( "got false from UpdateNetmapDelta" )
}
startedGoroutine := b . goTracker . StartedGoroutines ( ) != was
wantChange := tt . exitNodeIDWant != tt . lastSuggestedExitNode
if startedGoroutine != wantChange {
t . Errorf ( "got startedGoroutine %v, want %v" , startedGoroutine , wantChange )
}
if startedGoroutine {
select {
case <- time . After ( 5 * time . Second ) :
t . Fatal ( "timed out waiting for goroutine to finish" )
case <- allDone :
}
2024-06-28 23:17:31 -04:00
}
2025-01-03 14:30:02 -08:00
b . mu . Lock ( )
gotExitNode := b . pm . prefs . ExitNodeID ( )
b . mu . Unlock ( )
if gotExitNode != tt . exitNodeIDWant {
t . Fatalf ( "exit node ID after UpdateNetmapDelta = %v; want %v" , gotExitNode , tt . exitNodeIDWant )
2024-06-28 23:17:31 -04:00
}
} )
}
}
func TestAutoExitNodeSetNetInfoCallback ( t * testing . T ) {
b := newTestLocalBackend ( t )
hi := hostinfo . New ( )
ni := tailcfg . NetInfo { LinkType : "wired" }
hi . NetInfo = & ni
b . hostinfo = hi
k := key . NewMachine ( )
var cc * mockControl
opts := controlclient . Options {
ServerURL : "https://example.com" ,
GetMachinePrivateKey : func ( ) ( key . MachinePrivate , error ) {
return k , nil
} ,
Dialer : tsdial . NewDialer ( netmon . NewStatic ( ) ) ,
Logf : b . logf ,
}
cc = newClient ( t , opts )
b . cc = cc
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
policyStore := source . NewTestStoreOf ( t , source . TestSettingOf (
syspolicy . ExitNodeID , "auto:any" ,
) )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2024-06-28 23:17:31 -04:00
peer1 := makePeer ( 1 , withCap ( 26 ) , withDERP ( 3 ) , withSuggest ( ) , withExitRoutes ( ) )
peer2 := makePeer ( 2 , withCap ( 26 ) , withDERP ( 2 ) , withSuggest ( ) , withExitRoutes ( ) )
selfNode := tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
2025-01-14 10:19:52 -08:00
HomeDERP : 2 ,
2024-06-28 23:17:31 -04:00
}
defaultDERPMap := & tailcfg . DERPMap {
Regions : map [ int ] * tailcfg . DERPRegion {
1 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t1" ,
RegionID : 1 ,
} ,
} ,
} ,
2 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t2" ,
RegionID : 2 ,
} ,
} ,
} ,
3 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t3" ,
RegionID : 3 ,
} ,
} ,
} ,
} ,
}
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( & netmap . NetworkMap {
2024-06-28 23:17:31 -04:00
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2 ,
} ,
DERPMap : defaultDERPMap ,
2025-04-24 21:54:48 -05:00
} )
2024-06-28 23:17:31 -04:00
b . lastSuggestedExitNode = peer1 . StableID ( )
b . SetPrefsForTest ( b . pm . CurrentPrefs ( ) . AsStruct ( ) )
if eid := b . Prefs ( ) . ExitNodeID ( ) ; eid != peer1 . StableID ( ) {
t . Errorf ( "got initial exit node %v, want %v" , eid , peer1 . StableID ( ) )
}
b . refreshAutoExitNode = true
b . sys . MagicSock . Get ( ) . SetLastNetcheckReportForTest ( b . ctx , & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 5 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
PreferredDERP : 2 ,
} )
b . setNetInfo ( & ni )
if eid := b . Prefs ( ) . ExitNodeID ( ) ; eid != peer2 . StableID ( ) {
t . Errorf ( "got final exit node %v, want %v" , eid , peer2 . StableID ( ) )
}
}
2024-07-12 11:06:07 -04:00
func TestSetControlClientStatusAutoExitNode ( t * testing . T ) {
2025-07-03 11:51:27 -05:00
peer1 := makePeer ( 1 , withCap ( 26 ) , withSuggest ( ) , withExitRoutes ( ) , withOnline ( true ) , withNodeKey ( ) )
peer2 := makePeer ( 2 , withCap ( 26 ) , withSuggest ( ) , withExitRoutes ( ) , withOnline ( true ) , withNodeKey ( ) )
2024-07-12 11:06:07 -04:00
derpMap := & tailcfg . DERPMap {
Regions : map [ int ] * tailcfg . DERPRegion {
1 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t1" ,
RegionID : 1 ,
} ,
} ,
} ,
2 : {
Nodes : [ ] * tailcfg . DERPNode {
{
Name : "t2" ,
RegionID : 2 ,
} ,
} ,
} ,
} ,
}
report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 5 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
PreferredDERP : 1 ,
}
nm := & netmap . NetworkMap {
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2 ,
} ,
DERPMap : derpMap ,
}
b := newTestLocalBackend ( t )
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
policyStore := source . NewTestStoreOf ( t , source . TestSettingOf (
syspolicy . ExitNodeID , "auto:any" ,
) )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2025-04-24 21:54:48 -05:00
b . currentNode ( ) . SetNetMap ( nm )
2025-07-03 11:51:27 -05:00
// Peer 2 should be the initial exit node, as it's better than peer 1
// in terms of latency and DERP region.
b . lastSuggestedExitNode = peer2 . StableID ( )
2024-07-12 11:06:07 -04:00
b . sys . MagicSock . Get ( ) . SetLastNetcheckReportForTest ( b . ctx , report )
b . SetPrefsForTest ( b . pm . CurrentPrefs ( ) . AsStruct ( ) )
2025-07-03 11:51:27 -05:00
offlinePeer2 := makePeer ( 2 , withCap ( 26 ) , withSuggest ( ) , withExitRoutes ( ) , withOnline ( false ) , withNodeKey ( ) )
2024-07-12 11:06:07 -04:00
updatedNetmap := & netmap . NetworkMap {
Peers : [ ] tailcfg . NodeView {
2025-07-03 11:51:27 -05:00
peer1 ,
offlinePeer2 ,
2024-07-12 11:06:07 -04:00
} ,
DERPMap : derpMap ,
}
b . SetControlClientStatus ( b . cc , controlclient . Status { NetMap : updatedNetmap } )
2025-07-03 11:51:27 -05:00
// But now that peer 2 is offline, we should switch to peer 1.
wantExitNode := peer1 . StableID ( )
gotExitNode := b . Prefs ( ) . ExitNodeID ( )
if gotExitNode != wantExitNode {
t . Errorf ( "did not switch exit nodes despite auto exit node going offline: got %q; want %q" , gotExitNode , wantExitNode )
2024-07-12 11:06:07 -04:00
}
}
2023-11-01 17:20:25 -04:00
func TestApplySysPolicy ( t * testing . T ) {
tests := [ ] struct {
name string
prefs ipn . Prefs
wantPrefs ipn . Prefs
wantAnyChange bool
stringPolicies map [ syspolicy . Key ] string
} {
{
name : "empty prefs without policies" ,
} ,
{
name : "prefs set without policies" ,
prefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : true ,
CorpDNS : true ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
wantPrefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : true ,
CorpDNS : true ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
} ,
{
name : "empty prefs with policies" ,
wantPrefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : true ,
CorpDNS : true ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ControlURL : "1" ,
syspolicy . EnableIncomingConnections : "never" ,
syspolicy . EnableServerMode : "always" ,
2023-12-05 17:16:34 -05:00
syspolicy . ExitNodeAllowLANAccess : "always" ,
syspolicy . EnableTailscaleDNS : "always" ,
syspolicy . EnableTailscaleSubnets : "always" ,
2023-11-01 17:20:25 -04:00
} ,
} ,
{
name : "prefs set with matching policies" ,
prefs : ipn . Prefs {
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
} ,
wantPrefs : ipn . Prefs {
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
} ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ControlURL : "1" ,
syspolicy . EnableIncomingConnections : "never" ,
syspolicy . EnableServerMode : "always" ,
2023-12-05 17:16:34 -05:00
syspolicy . ExitNodeAllowLANAccess : "never" ,
syspolicy . EnableTailscaleDNS : "never" ,
syspolicy . EnableTailscaleSubnets : "never" ,
2023-11-01 17:20:25 -04:00
} ,
} ,
{
name : "prefs set with conflicting policies" ,
prefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : false ,
CorpDNS : true ,
RouteAll : false ,
2023-11-01 17:20:25 -04:00
} ,
wantPrefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "2" ,
ShieldsUp : false ,
ForceDaemon : false ,
ExitNodeAllowLANAccess : true ,
CorpDNS : false ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ControlURL : "2" ,
syspolicy . EnableIncomingConnections : "always" ,
syspolicy . EnableServerMode : "never" ,
2023-12-05 17:16:34 -05:00
syspolicy . ExitNodeAllowLANAccess : "always" ,
syspolicy . EnableTailscaleDNS : "never" ,
syspolicy . EnableTailscaleSubnets : "always" ,
2023-11-01 17:20:25 -04:00
} ,
} ,
{
name : "prefs set with neutral policies" ,
prefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : false ,
CorpDNS : true ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
wantPrefs : ipn . Prefs {
2023-12-05 17:16:34 -05:00
ControlURL : "1" ,
ShieldsUp : true ,
ForceDaemon : true ,
ExitNodeAllowLANAccess : false ,
CorpDNS : true ,
RouteAll : true ,
2023-11-01 17:20:25 -04:00
} ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . EnableIncomingConnections : "user-decides" ,
syspolicy . EnableServerMode : "user-decides" ,
2023-12-05 17:16:34 -05:00
syspolicy . ExitNodeAllowLANAccess : "user-decides" ,
syspolicy . EnableTailscaleDNS : "user-decides" ,
syspolicy . EnableTailscaleSubnets : "user-decides" ,
2023-11-01 17:20:25 -04:00
} ,
} ,
{
name : "ControlURL" ,
wantPrefs : ipn . Prefs {
ControlURL : "set" ,
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ControlURL : "set" ,
} ,
} ,
2023-12-07 12:01:31 -05:00
{
name : "enable AutoUpdate apply does not unset check" ,
prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( false ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantPrefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ApplyUpdates : "always" ,
} ,
} ,
{
name : "disable AutoUpdate apply does not unset check" ,
prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantPrefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( false ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . ApplyUpdates : "never" ,
} ,
} ,
{
name : "enable AutoUpdate check does not unset apply" ,
prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : false ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantPrefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . CheckUpdates : "always" ,
} ,
} ,
{
name : "disable AutoUpdate check does not unset apply" ,
prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : true ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantPrefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Check : false ,
2023-12-18 16:57:03 -06:00
Apply : opt . NewBool ( true ) ,
2023-12-07 12:01:31 -05:00
} ,
} ,
wantAnyChange : true ,
stringPolicies : map [ syspolicy . Key ] string {
syspolicy . CheckUpdates : "never" ,
} ,
} ,
2023-11-01 17:20:25 -04:00
}
2023-12-05 17:16:34 -05:00
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
2023-11-01 17:20:25 -04:00
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2024-10-08 10:50:14 -05:00
settings := make ( [ ] source . TestSetting [ string ] , 0 , len ( tt . stringPolicies ) )
2023-12-05 17:16:34 -05:00
for p , v := range tt . stringPolicies {
2024-10-08 10:50:14 -05:00
settings = append ( settings , source . TestSettingOf ( p , v ) )
2023-12-05 17:16:34 -05:00
}
2024-10-08 10:50:14 -05:00
policyStore := source . NewTestStoreOf ( t , settings ... )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2023-11-01 17:20:25 -04:00
2023-12-05 17:16:34 -05:00
t . Run ( "unit" , func ( t * testing . T ) {
2023-11-01 17:20:25 -04:00
prefs := tt . prefs . Clone ( )
2025-07-07 19:05:41 -05:00
lb := newTestLocalBackend ( t )
gotAnyChange := lb . applySysPolicyLocked ( prefs )
2023-11-01 17:20:25 -04:00
if gotAnyChange && prefs . Equals ( & tt . prefs ) {
t . Errorf ( "anyChange but prefs is unchanged: %v" , prefs . Pretty ( ) )
}
if ! gotAnyChange && ! prefs . Equals ( & tt . prefs ) {
t . Errorf ( "!anyChange but prefs changed from %v to %v" , tt . prefs . Pretty ( ) , prefs . Pretty ( ) )
}
if gotAnyChange != tt . wantAnyChange {
t . Errorf ( "anyChange=%v, want %v" , gotAnyChange , tt . wantAnyChange )
}
if ! prefs . Equals ( & tt . wantPrefs ) {
t . Errorf ( "prefs=%v, want %v" , prefs . Pretty ( ) , tt . wantPrefs . Pretty ( ) )
}
} )
t . Run ( "status update" , func ( t * testing . T ) {
// Profile manager fills in blank ControlURL but it's not set
// in most test cases to avoid cluttering them, so adjust for
// that.
usePrefs := tt . prefs . Clone ( )
if usePrefs . ControlURL == "" {
usePrefs . ControlURL = ipn . DefaultControlURL
}
wantPrefs := tt . wantPrefs . Clone ( )
if wantPrefs . ControlURL == "" {
wantPrefs . ControlURL = ipn . DefaultControlURL
}
2024-05-03 10:59:22 -04:00
pm := must . Get ( newProfileManager ( new ( mem . Store ) , t . Logf , new ( health . Tracker ) ) )
2023-11-01 17:20:25 -04:00
pm . prefs = usePrefs . View ( )
b := newTestBackend ( t )
b . mu . Lock ( )
b . pm = pm
b . mu . Unlock ( )
b . SetControlClientStatus ( b . cc , controlclient . Status { } )
if ! b . Prefs ( ) . Equals ( wantPrefs . View ( ) ) {
t . Errorf ( "prefs=%v, want %v" , b . Prefs ( ) . Pretty ( ) , wantPrefs . Pretty ( ) )
}
} )
} )
}
}
2023-12-05 17:16:34 -05:00
func TestPreferencePolicyInfo ( t * testing . T ) {
tests := [ ] struct {
name string
initialValue bool
wantValue bool
wantChange bool
policyValue string
policyError error
} {
{
name : "force enable modify" ,
initialValue : false ,
wantValue : true ,
wantChange : true ,
policyValue : "always" ,
} ,
{
name : "force enable unchanged" ,
initialValue : true ,
wantValue : true ,
policyValue : "always" ,
} ,
{
name : "force disable modify" ,
initialValue : true ,
wantValue : false ,
wantChange : true ,
policyValue : "never" ,
} ,
{
name : "force disable unchanged" ,
initialValue : false ,
wantValue : false ,
policyValue : "never" ,
} ,
{
name : "unforced enabled" ,
initialValue : true ,
wantValue : true ,
policyValue : "user-decides" ,
} ,
{
name : "unforced disabled" ,
initialValue : false ,
wantValue : false ,
policyValue : "user-decides" ,
} ,
{
name : "blank enabled" ,
initialValue : true ,
wantValue : true ,
policyValue : "" ,
} ,
{
name : "blank disabled" ,
initialValue : false ,
wantValue : false ,
policyValue : "" ,
} ,
{
name : "unset enabled" ,
initialValue : true ,
wantValue : true ,
policyError : syspolicy . ErrNoSuchKey ,
} ,
{
name : "unset disabled" ,
initialValue : false ,
wantValue : false ,
policyError : syspolicy . ErrNoSuchKey ,
} ,
{
name : "error enabled" ,
initialValue : true ,
wantValue : true ,
policyError : errors . New ( "test error" ) ,
} ,
{
name : "error disabled" ,
initialValue : false ,
wantValue : false ,
policyError : errors . New ( "test error" ) ,
} ,
}
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
2023-12-05 17:16:34 -05:00
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
for _ , pp := range preferencePolicies {
t . Run ( string ( pp . key ) , func ( t * testing . T ) {
2024-10-08 10:50:14 -05:00
s := source . TestSetting [ string ] {
Key : pp . key ,
Error : tt . policyError ,
Value : tt . policyValue ,
2023-12-05 17:16:34 -05:00
}
2024-10-08 10:50:14 -05:00
policyStore := source . NewTestStoreOf ( t , s )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2023-12-05 17:16:34 -05:00
prefs := defaultPrefs . AsStruct ( )
pp . set ( prefs , tt . initialValue )
2025-07-07 19:05:41 -05:00
lb := newTestLocalBackend ( t )
gotAnyChange := lb . applySysPolicyLocked ( prefs )
2023-12-05 17:16:34 -05:00
if gotAnyChange != tt . wantChange {
t . Errorf ( "anyChange=%v, want %v" , gotAnyChange , tt . wantChange )
}
got := pp . get ( prefs . View ( ) )
if got != tt . wantValue {
t . Errorf ( "pref=%v, want %v" , got , tt . wantValue )
}
} )
}
} )
}
}
2023-12-18 16:57:03 -06:00
func TestOnTailnetDefaultAutoUpdate ( t * testing . T ) {
tests := [ ] struct {
before , after opt . Bool
2024-06-06 16:31:52 -07:00
container opt . Bool
2023-12-18 16:57:03 -06:00
tailnetDefault bool
} {
{
before : opt . Bool ( "" ) ,
tailnetDefault : true ,
after : opt . NewBool ( true ) ,
} ,
{
before : opt . Bool ( "" ) ,
tailnetDefault : false ,
after : opt . NewBool ( false ) ,
} ,
{
before : opt . Bool ( "unset" ) ,
tailnetDefault : true ,
after : opt . NewBool ( true ) ,
} ,
{
before : opt . Bool ( "unset" ) ,
tailnetDefault : false ,
after : opt . NewBool ( false ) ,
} ,
{
before : opt . NewBool ( false ) ,
tailnetDefault : true ,
after : opt . NewBool ( false ) ,
} ,
{
before : opt . NewBool ( true ) ,
tailnetDefault : false ,
after : opt . NewBool ( true ) ,
} ,
2024-06-06 16:31:52 -07:00
{
before : opt . Bool ( "" ) ,
container : opt . NewBool ( true ) ,
tailnetDefault : true ,
after : opt . Bool ( "" ) ,
} ,
{
before : opt . NewBool ( false ) ,
container : opt . NewBool ( true ) ,
tailnetDefault : true ,
after : opt . NewBool ( false ) ,
} ,
{
before : opt . NewBool ( true ) ,
container : opt . NewBool ( true ) ,
tailnetDefault : false ,
after : opt . NewBool ( true ) ,
} ,
2023-12-18 16:57:03 -06:00
}
for _ , tt := range tests {
2024-04-26 22:06:20 -07:00
t . Run ( fmt . Sprintf ( "before=%s,after=%s" , tt . before , tt . after ) , func ( t * testing . T ) {
2023-12-18 16:57:03 -06:00
b := newTestBackend ( t )
2024-06-06 16:31:52 -07:00
b . hostinfo = hostinfo . New ( )
b . hostinfo . Container = tt . container
2023-12-18 16:57:03 -06:00
p := ipn . NewPrefs ( )
p . AutoUpdate . Apply = tt . before
ipn/ipnlocal: refactor and cleanup profileManager
In preparation for multi-user and unattended mode improvements, we are
refactoring and cleaning up `ipn/ipnlocal.profileManager`. The concept of the
"current user", which is only relevant on Windows, is being deprecated and will
soon be removed to allow more than one Windows user to connect and utilize
`LocalBackend` according to that user's access rights to the device and specific
Tailscale profiles.
We plan to pass the user's identity down to the `profileManager`, where it can
be used to determine the user's access rights to a given `LoginProfile`. While
the new permission model in `ipnauth` requires more work and is currently
blocked pending PR reviews, we are updating the `profileManager` to reduce its
reliance on the concept of a single OS user being connected to the backend at
the same time.
We extract the switching to the default Tailscale profile, which may also
trigger legacy profile migration, from `profileManager.SetCurrentUserID`. This
introduces `profileManager.DefaultUserProfileID`, which returns the default
profile ID for the current user, and `profileManager.SwitchToDefaultProfile`,
which is essentially a shorthand for `pm.SwitchProfile(pm.DefaultUserProfileID())`.
Both methods will eventually be updated to accept the user's identity and
utilize that user's default profile.
We make access checks more explicit by introducing the `profileManager.checkProfileAccess`
method. The current implementation continues to use `profileManager.currentUserID`
and `LoginProfile.LocalUserID` to determine whether access to a given profile
should be granted. This will be updated to utilize the `ipnauth` package and the
new permissions model once it's ready. We also expand access checks to be used
more widely in the `profileManager`, not just when switching or listing
profiles. This includes access checks in methods like `SetPrefs` and, most notably,
`DeleteProfile` and `DeleteAllProfiles`, preventing unprivileged Windows users
from deleting Tailscale profiles owned by other users on the same device,
including profiles owned by local admins.
We extract `profileManager.ProfilePrefs` and `profileManager.SetProfilePrefs`
methods that can be used to get and set preferences of a given `LoginProfile` if
`profileManager.checkProfileAccess` permits access to it.
We also update `profileManager.setUnattendedModeAsConfigured` to always enable
unattended mode on Windows if `Prefs.ForceDaemon` is true in the current
`LoginProfile`, even if `profileManager.currentUserID` is `""`. This facilitates
enabling unattended mode via `tailscale up --unattended` even if
`tailscale-ipn.exe` is not running, such as when a Group Policy or MDM-deployed
script runs at boot time, or when Tailscale is used on a Server Code or otherwise
headless Windows environments. See #12239, #2137, #3186 and
https://github.com/tailscale/tailscale/pull/6255#issuecomment-2016623838 for
details.
Fixes #12239
Updates tailscale/corp#18342
Updates #3186
Updates #2137
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-08-28 14:23:35 -05:00
if err := b . pm . setPrefsNoPermCheck ( p . View ( ) ) ; err != nil {
2023-12-18 16:57:03 -06:00
t . Fatal ( err )
}
b . onTailnetDefaultAutoUpdate ( tt . tailnetDefault )
2024-04-29 13:35:29 -07:00
want := tt . after
// On platforms that don't support auto-update we can never
// transition to auto-updates being enabled. The value should
// remain unchanged after onTailnetDefaultAutoUpdate.
2025-04-15 14:01:53 -07:00
if ! clientupdate . CanAutoUpdate ( ) {
2024-04-29 13:35:29 -07:00
want = tt . before
}
if got := b . pm . CurrentPrefs ( ) . AutoUpdate ( ) . Apply ; got != want {
2023-12-18 16:57:03 -06:00
t . Errorf ( "got: %q, want %q" , got , want )
}
} )
}
}
2024-02-28 11:44:42 -06:00
func TestTCPHandlerForDst ( t * testing . T ) {
b := newTestBackend ( t )
2025-01-20 12:02:53 -05:00
tests := [ ] struct {
desc string
dst string
intercept bool
} {
{
desc : "intercept port 80 (Web UI) on quad100 IPv4" ,
dst : "100.100.100.100:80" ,
intercept : true ,
} ,
{
desc : "intercept port 80 (Web UI) on quad100 IPv6" ,
dst : "[fd7a:115c:a1e0::53]:80" ,
intercept : true ,
} ,
{
desc : "don't intercept port 80 on local ip" ,
dst : "100.100.103.100:80" ,
intercept : false ,
} ,
{
desc : "intercept port 8080 (Taildrive) on quad100 IPv4" ,
dst : "[fd7a:115c:a1e0::53]:8080" ,
intercept : true ,
} ,
{
desc : "don't intercept port 8080 on local ip" ,
dst : "100.100.103.100:8080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on quad100 IPv4" ,
dst : "100.100.100.100:9080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on quad100 IPv6" ,
dst : "[fd7a:115c:a1e0::53]:9080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on local ip" ,
dst : "100.100.103.100:9080" ,
intercept : false ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . dst , func ( t * testing . T ) {
t . Log ( tt . desc )
src := netip . MustParseAddrPort ( "100.100.102.100:51234" )
h , _ := b . TCPHandlerForDst ( src , netip . MustParseAddrPort ( tt . dst ) )
if ! tt . intercept && h != nil {
t . Error ( "intercepted traffic we shouldn't have" )
} else if tt . intercept && h == nil {
t . Error ( "failed to intercept traffic we should have" )
}
} )
}
}
func TestTCPHandlerForDstWithVIPService ( t * testing . T ) {
b := newTestBackend ( t )
svcIPMap := tailcfg . ServiceIPMappings {
"svc:foo" : [ ] netip . Addr {
netip . MustParseAddr ( "100.101.101.101" ) ,
netip . MustParseAddr ( "fd7a:115c:a1e0:ab12:4843:cd96:6565:6565" ) ,
} ,
"svc:bar" : [ ] netip . Addr {
netip . MustParseAddr ( "100.99.99.99" ) ,
netip . MustParseAddr ( "fd7a:115c:a1e0:ab12:4843:cd96:626b:628b" ) ,
} ,
"svc:baz" : [ ] netip . Addr {
netip . MustParseAddr ( "100.133.133.133" ) ,
netip . MustParseAddr ( "fd7a:115c:a1e0:ab12:4843:cd96:8585:8585" ) ,
} ,
}
svcIPMapJSON , err := json . Marshal ( svcIPMap )
if err != nil {
t . Fatal ( err )
}
b . setNetMapLocked (
& netmap . NetworkMap {
SelfNode : ( & tailcfg . Node {
Name : "example.ts.net" ,
CapMap : tailcfg . NodeCapMap {
tailcfg . NodeAttrServiceHost : [ ] tailcfg . RawMessage { tailcfg . RawMessage ( svcIPMapJSON ) } ,
} ,
} ) . View ( ) ,
2025-01-24 19:41:30 -08:00
UserProfiles : map [ tailcfg . UserID ] tailcfg . UserProfileView {
tailcfg . UserID ( 1 ) : ( & tailcfg . UserProfile {
2025-01-20 12:02:53 -05:00
LoginName : "someone@example.com" ,
DisplayName : "Some One" ,
ProfilePicURL : "https://example.com/photo.jpg" ,
2025-01-24 19:41:30 -08:00
} ) . View ( ) ,
2025-01-20 12:02:53 -05:00
} ,
} ,
)
err = b . setServeConfigLocked (
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-20 12:02:53 -05:00
"svc:foo" : {
TCP : map [ uint16 ] * ipn . TCPPortHandler {
882 : { HTTP : true } ,
883 : { HTTPS : true } ,
} ,
Web : map [ ipn . HostPort ] * ipn . WebServerConfig {
"foo.example.ts.net:882" : {
Handlers : map [ string ] * ipn . HTTPHandler {
"/" : { Proxy : "http://127.0.0.1:3000" } ,
} ,
} ,
"foo.example.ts.net:883" : {
Handlers : map [ string ] * ipn . HTTPHandler {
"/" : { Text : "test" } ,
} ,
} ,
} ,
} ,
"svc:bar" : {
TCP : map [ uint16 ] * ipn . TCPPortHandler {
990 : { TCPForward : "127.0.0.1:8443" } ,
991 : { TCPForward : "127.0.0.1:5432" , TerminateTLS : "bar.test.ts.net" } ,
} ,
} ,
"svc:qux" : {
TCP : map [ uint16 ] * ipn . TCPPortHandler {
600 : { HTTPS : true } ,
} ,
Web : map [ ipn . HostPort ] * ipn . WebServerConfig {
"qux.example.ts.net:600" : {
Handlers : map [ string ] * ipn . HTTPHandler {
"/" : { Text : "qux" } ,
} ,
} ,
} ,
} ,
} ,
} ,
"" ,
)
if err != nil {
t . Fatal ( err )
}
2024-02-28 11:44:42 -06:00
tests := [ ] struct {
desc string
dst string
intercept bool
} {
{
desc : "intercept port 80 (Web UI) on quad100 IPv4" ,
dst : "100.100.100.100:80" ,
intercept : true ,
} ,
{
desc : "intercept port 80 (Web UI) on quad100 IPv6" ,
dst : "[fd7a:115c:a1e0::53]:80" ,
intercept : true ,
} ,
{
desc : "don't intercept port 80 on local ip" ,
dst : "100.100.103.100:80" ,
intercept : false ,
} ,
{
2024-04-03 10:09:58 -07:00
desc : "intercept port 8080 (Taildrive) on quad100 IPv4" ,
2024-02-28 11:44:42 -06:00
dst : "100.100.100.100:8080" ,
intercept : true ,
} ,
{
2024-04-03 10:09:58 -07:00
desc : "intercept port 8080 (Taildrive) on quad100 IPv6" ,
2024-02-28 11:44:42 -06:00
dst : "[fd7a:115c:a1e0::53]:8080" ,
intercept : true ,
} ,
{
desc : "don't intercept port 8080 on local ip" ,
dst : "100.100.103.100:8080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on quad100 IPv4" ,
dst : "100.100.100.100:9080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on quad100 IPv6" ,
dst : "[fd7a:115c:a1e0::53]:9080" ,
intercept : false ,
} ,
{
desc : "don't intercept port 9080 on local ip" ,
dst : "100.100.103.100:9080" ,
intercept : false ,
} ,
2025-01-20 12:02:53 -05:00
// VIP service destinations
{
desc : "intercept port 882 (HTTP) on service foo IPv4" ,
dst : "100.101.101.101:882" ,
intercept : true ,
} ,
{
desc : "intercept port 882 (HTTP) on service foo IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:882" ,
intercept : true ,
} ,
{
desc : "intercept port 883 (HTTPS) on service foo IPv4" ,
dst : "100.101.101.101:883" ,
intercept : true ,
} ,
{
desc : "intercept port 883 (HTTPS) on service foo IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:883" ,
intercept : true ,
} ,
{
desc : "intercept port 990 (TCPForward) on service bar IPv4" ,
dst : "100.99.99.99:990" ,
intercept : true ,
} ,
{
desc : "intercept port 990 (TCPForward) on service bar IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:990" ,
intercept : true ,
} ,
{
desc : "intercept port 991 (TCPForward with TerminateTLS) on service bar IPv4" ,
dst : "100.99.99.99:990" ,
intercept : true ,
} ,
{
desc : "intercept port 991 (TCPForward with TerminateTLS) on service bar IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:990" ,
intercept : true ,
} ,
{
desc : "don't intercept port 4444 on service foo IPv4" ,
dst : "100.101.101.101:4444" ,
intercept : false ,
} ,
{
desc : "don't intercept port 4444 on service foo IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:4444" ,
intercept : false ,
} ,
{
desc : "don't intercept port 600 on unknown service IPv4" ,
dst : "100.22.22.22:883" ,
intercept : false ,
} ,
{
desc : "don't intercept port 600 on unknown service IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:883" ,
intercept : false ,
} ,
{
desc : "don't intercept port 600 (HTTPS) on service baz IPv4" ,
dst : "100.133.133.133:600" ,
intercept : false ,
} ,
{
desc : "don't intercept port 600 (HTTPS) on service baz IPv6" ,
dst : "[fd7a:115c:a1e0:ab12:4843:cd96:8585:8585]:600" ,
intercept : false ,
} ,
2024-02-28 11:44:42 -06:00
}
for _ , tt := range tests {
t . Run ( tt . dst , func ( t * testing . T ) {
t . Log ( tt . desc )
src := netip . MustParseAddrPort ( "100.100.102.100:51234" )
h , _ := b . TCPHandlerForDst ( src , netip . MustParseAddrPort ( tt . dst ) )
if ! tt . intercept && h != nil {
t . Error ( "intercepted traffic we shouldn't have" )
} else if tt . intercept && h == nil {
t . Error ( "failed to intercept traffic we should have" )
}
} )
}
}
2024-03-08 10:43:32 -06:00
2024-04-03 10:09:58 -07:00
func TestDriveManageShares ( t * testing . T ) {
2024-03-08 10:43:32 -06:00
tests := [ ] struct {
name string
disabled bool
2024-04-02 13:32:30 -07:00
existing [ ] * drive . Share
add * drive . Share
2024-03-08 10:43:32 -06:00
remove string
rename [ 2 ] string
expect any
} {
{
name : "append" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "d" } ,
} ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : " E " } ,
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "d" } ,
{ Name : "e" } ,
} ,
} ,
{
name : "prepend" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "d" } ,
} ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : " A " } ,
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
{ Name : "d" } ,
} ,
} ,
{
name : "insert" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "d" } ,
} ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : " C " } ,
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "c" } ,
{ Name : "d" } ,
} ,
} ,
{
name : "replace" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" , Path : "i" } ,
{ Name : "d" } ,
} ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : " B " , Path : "ii" } ,
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" , Path : "ii" } ,
{ Name : "d" } ,
} ,
} ,
{
name : "add_bad_name" ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : "$" } ,
2024-04-05 11:43:13 -07:00
expect : drive . ErrInvalidShareName ,
2024-03-08 10:43:32 -06:00
} ,
{
name : "add_disabled" ,
disabled : true ,
2024-04-02 13:32:30 -07:00
add : & drive . Share { Name : "a" } ,
2024-04-05 11:43:13 -07:00
expect : drive . ErrDriveNotEnabled ,
2024-03-08 10:43:32 -06:00
} ,
{
name : "remove" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
{ Name : "c" } ,
} ,
remove : "b" ,
2024-04-02 13:32:30 -07:00
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "c" } ,
} ,
} ,
{
name : "remove_non_existing" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
{ Name : "c" } ,
} ,
remove : "D" ,
expect : os . ErrNotExist ,
} ,
{
name : "remove_disabled" ,
disabled : true ,
remove : "b" ,
2024-04-05 11:43:13 -07:00
expect : drive . ErrDriveNotEnabled ,
2024-03-08 10:43:32 -06:00
} ,
{
name : "rename" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
} ,
rename : [ 2 ] string { "a" , " C " } ,
2024-04-02 13:32:30 -07:00
expect : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "b" } ,
{ Name : "c" } ,
} ,
} ,
{
name : "rename_not_exist" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
} ,
rename : [ 2 ] string { "d" , "c" } ,
expect : os . ErrNotExist ,
} ,
{
name : "rename_exists" ,
2024-04-02 13:32:30 -07:00
existing : [ ] * drive . Share {
2024-03-08 10:43:32 -06:00
{ Name : "a" } ,
{ Name : "b" } ,
} ,
rename : [ 2 ] string { "a" , "b" } ,
expect : os . ErrExist ,
} ,
{
name : "rename_bad_name" ,
rename : [ 2 ] string { "a" , "$" } ,
2024-04-05 11:43:13 -07:00
expect : drive . ErrInvalidShareName ,
2024-03-08 10:43:32 -06:00
} ,
{
name : "rename_disabled" ,
disabled : true ,
rename : [ 2 ] string { "a" , "c" } ,
2024-04-05 11:43:13 -07:00
expect : drive . ErrDriveNotEnabled ,
2024-03-08 10:43:32 -06:00
} ,
}
2024-04-02 13:32:30 -07:00
drive . DisallowShareAs = true
2024-03-08 10:43:32 -06:00
t . Cleanup ( func ( ) {
2024-04-02 13:32:30 -07:00
drive . DisallowShareAs = false
2024-03-08 10:43:32 -06:00
} )
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
b := newTestBackend ( t )
b . mu . Lock ( )
if tt . existing != nil {
2024-04-03 10:09:58 -07:00
b . driveSetSharesLocked ( tt . existing )
2024-03-08 10:43:32 -06:00
}
if ! tt . disabled {
2025-04-24 21:54:48 -05:00
nm := ptr . To ( * b . currentNode ( ) . NetMap ( ) )
self := nm . SelfNode . AsStruct ( )
2024-04-04 13:07:58 -07:00
self . CapMap = tailcfg . NodeCapMap { tailcfg . NodeAttrsTaildriveShare : nil }
2025-04-24 21:54:48 -05:00
nm . SelfNode = self . View ( )
b . currentNode ( ) . SetNetMap ( nm )
2024-04-02 13:32:30 -07:00
b . sys . Set ( driveimpl . NewFileSystemForRemote ( b . logf ) )
2024-03-08 10:43:32 -06:00
}
b . mu . Unlock ( )
ctx , cancel := context . WithTimeout ( context . Background ( ) , 2 * time . Second )
t . Cleanup ( cancel )
2024-04-02 13:32:30 -07:00
result := make ( chan views . SliceView [ * drive . Share , drive . ShareView ] , 1 )
2024-03-08 10:43:32 -06:00
var wg sync . WaitGroup
wg . Add ( 1 )
go b . WatchNotifications (
ctx ,
0 ,
func ( ) { wg . Done ( ) } ,
func ( n * ipn . Notify ) bool {
select {
2024-04-03 10:09:58 -07:00
case result <- n . DriveShares :
2024-03-08 10:43:32 -06:00
default :
//
}
return false
} ,
)
wg . Wait ( )
var err error
switch {
case tt . add != nil :
2024-04-03 10:09:58 -07:00
err = b . DriveSetShare ( tt . add )
2024-03-08 10:43:32 -06:00
case tt . remove != "" :
2024-04-03 10:09:58 -07:00
err = b . DriveRemoveShare ( tt . remove )
2024-03-08 10:43:32 -06:00
default :
2024-04-03 10:09:58 -07:00
err = b . DriveRenameShare ( tt . rename [ 0 ] , tt . rename [ 1 ] )
2024-03-08 10:43:32 -06:00
}
switch e := tt . expect . ( type ) {
case error :
if ! errors . Is ( err , e ) {
t . Errorf ( "expected error, want: %v got: %v" , e , err )
}
2024-04-02 13:32:30 -07:00
case [ ] * drive . Share :
2024-03-08 10:43:32 -06:00
if err != nil {
t . Errorf ( "unexpected error: %v" , err )
} else {
r := <- result
got , err := json . MarshalIndent ( r , "" , " " )
if err != nil {
t . Fatalf ( "can't marshal got: %v" , err )
}
want , err := json . MarshalIndent ( e , "" , " " )
if err != nil {
t . Fatalf ( "can't marshal want: %v" , err )
}
if diff := cmp . Diff ( string ( got ) , string ( want ) ) ; diff != "" {
t . Errorf ( "wrong shares; (-got+want):%v" , diff )
}
}
}
} )
}
}
2024-03-13 17:31:07 -07:00
func TestValidPopBrowserURL ( t * testing . T ) {
b := newTestBackend ( t )
tests := [ ] struct {
desc string
controlURL string
popBrowserURL string
want bool
} {
{ "saas_login" , "https://login.tailscale.com" , "https://login.tailscale.com/a/foo" , true } ,
{ "saas_controlplane" , "https://controlplane.tailscale.com" , "https://controlplane.tailscale.com/a/foo" , true } ,
{ "saas_root" , "https://login.tailscale.com" , "https://tailscale.com/" , true } ,
{ "saas_bad_hostname" , "https://login.tailscale.com" , "https://example.com/a/foo" , false } ,
{ "localhost" , "http://localhost" , "http://localhost/a/foo" , true } ,
{ "custom_control_url_https" , "https://example.com" , "https://example.com/a/foo" , true } ,
{ "custom_control_url_https_diff_domain" , "https://example.com" , "https://other.com/a/foo" , true } ,
{ "custom_control_url_http" , "http://example.com" , "http://example.com/a/foo" , true } ,
{ "custom_control_url_http_diff_domain" , "http://example.com" , "http://other.com/a/foo" , true } ,
{ "bad_scheme" , "https://example.com" , "http://example.com/a/foo" , false } ,
}
for _ , tt := range tests {
t . Run ( tt . desc , func ( t * testing . T ) {
if _ , err := b . EditPrefs ( & ipn . MaskedPrefs {
ControlURLSet : true ,
Prefs : ipn . Prefs {
ControlURL : tt . controlURL ,
} ,
} ) ; err != nil {
t . Fatal ( err )
}
got := b . validPopBrowserURL ( tt . popBrowserURL )
if got != tt . want {
t . Errorf ( "got %v, want %v" , got , tt . want )
}
} )
}
}
2024-03-26 13:14:43 -07:00
func TestRoundTraffic ( t * testing . T ) {
tests := [ ] struct {
name string
bytes int64
want float64
} {
{ name : "under 5 bytes" , bytes : 4 , want : 4 } ,
{ name : "under 1000 bytes" , bytes : 987 , want : 990 } ,
{ name : "under 10_000 bytes" , bytes : 8875 , want : 8900 } ,
{ name : "under 100_000 bytes" , bytes : 77777 , want : 78000 } ,
{ name : "under 1_000_000 bytes" , bytes : 666523 , want : 670000 } ,
{ name : "under 10_000_000 bytes" , bytes : 22556677 , want : 23000000 } ,
{ name : "under 1_000_000_000 bytes" , bytes : 1234234234 , want : 1200000000 } ,
{ name : "under 1_000_000_000 bytes" , bytes : 123423423499 , want : 123400000000 } ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
if result := roundTraffic ( tt . bytes ) ; result != tt . want {
t . Errorf ( "unexpected rounding got %v want %v" , result , tt . want )
}
} )
2024-04-14 19:47:32 -07:00
}
}
2024-03-26 13:14:43 -07:00
2024-04-14 19:47:32 -07:00
func ( b * LocalBackend ) SetPrefsForTest ( newp * ipn . Prefs ) {
if newp == nil {
panic ( "SetPrefsForTest got nil prefs" )
2024-03-26 13:14:43 -07:00
}
2024-04-14 19:47:32 -07:00
unlock := b . lockAndGetUnlock ( )
defer unlock ( )
b . setPrefsLockedOnEntry ( newp , unlock )
2024-03-26 13:14:43 -07:00
}
2024-04-15 18:14:20 -04:00
2024-05-31 23:21:55 -04:00
type peerOptFunc func ( * tailcfg . Node )
func makePeer ( id tailcfg . NodeID , opts ... peerOptFunc ) tailcfg . NodeView {
node := & tailcfg . Node {
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
ID : id ,
Key : makeNodeKeyFromID ( id ) ,
DiscoKey : makeDiscoKeyFromID ( id ) ,
StableID : tailcfg . StableNodeID ( fmt . Sprintf ( "stable%d" , id ) ) ,
Name : fmt . Sprintf ( "peer%d" , id ) ,
Online : ptr . To ( true ) ,
MachineAuthorized : true ,
HomeDERP : int ( id ) ,
2024-05-31 23:21:55 -04:00
}
for _ , opt := range opts {
opt ( node )
}
return node . View ( )
}
func withName ( name string ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . Name = name
}
}
func withDERP ( region int ) peerOptFunc {
return func ( n * tailcfg . Node ) {
2025-01-14 10:19:52 -08:00
n . HomeDERP = region
2024-05-31 23:21:55 -04:00
}
}
func withoutDERP ( ) peerOptFunc {
return func ( n * tailcfg . Node ) {
2025-01-14 10:19:52 -08:00
n . HomeDERP = 0
2024-05-31 23:21:55 -04:00
}
}
func withLocation ( loc tailcfg . LocationView ) peerOptFunc {
return func ( n * tailcfg . Node ) {
var hi * tailcfg . Hostinfo
if n . Hostinfo . Valid ( ) {
hi = n . Hostinfo . AsStruct ( )
} else {
hi = new ( tailcfg . Hostinfo )
}
hi . Location = loc . AsStruct ( )
n . Hostinfo = hi . View ( )
}
}
2025-07-10 22:15:55 -07:00
func withLocationPriority ( pri int ) peerOptFunc {
return func ( n * tailcfg . Node ) {
var hi * tailcfg . Hostinfo
if n . Hostinfo . Valid ( ) {
hi = n . Hostinfo . AsStruct ( )
} else {
hi = new ( tailcfg . Hostinfo )
}
if hi . Location == nil {
hi . Location = new ( tailcfg . Location )
}
hi . Location . Priority = pri
n . Hostinfo = hi . View ( )
}
}
2024-05-31 23:21:55 -04:00
func withExitRoutes ( ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . AllowedIPs = append ( n . AllowedIPs , tsaddr . ExitRoutes ( ) ... )
}
}
func withSuggest ( ) peerOptFunc {
return func ( n * tailcfg . Node ) {
mak . Set ( & n . CapMap , tailcfg . NodeAttrSuggestExitNode , [ ] tailcfg . RawMessage { } )
}
}
2024-06-28 23:17:31 -04:00
func withCap ( version tailcfg . CapabilityVersion ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . Cap = version
}
}
2024-07-12 11:06:07 -04:00
func withOnline ( isOnline bool ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . Online = & isOnline
}
}
func withNodeKey ( ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . Key = key . NewNode ( ) . Public ( )
}
}
cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection
With this change, policy enforcement and exit node resolution can happen in separate steps,
since enforcement no longer depends on resolving the suggested exit node. This keeps policy
enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution
to be asynchronous on netmap updates, link changes, etc.
Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode
after a manual exit node override, which is necessary for tailscale/corp#29969.
Updates tailscale/corp#29969
Updates #16459
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-07-03 12:21:29 -05:00
func withAddresses ( addresses ... netip . Prefix ) peerOptFunc {
return func ( n * tailcfg . Node ) {
n . Addresses = append ( n . Addresses , addresses ... )
}
}
2024-06-03 16:12:12 -04:00
func deterministicRegionForTest ( t testing . TB , want views . Slice [ int ] , use int ) selectRegionFunc {
t . Helper ( )
if ! views . SliceContains ( want , use ) {
t . Errorf ( "invalid test: use %v is not in want %v" , use , want )
}
return func ( got views . Slice [ int ] ) int {
if ! views . SliceEqualAnyOrder ( got , want ) {
t . Errorf ( "candidate regions = %v, want %v" , got , want )
}
return use
}
}
2024-05-31 09:54:46 -04:00
func deterministicNodeForTest ( t testing . TB , want views . Slice [ tailcfg . StableNodeID ] , wantLast tailcfg . StableNodeID , use tailcfg . StableNodeID ) selectNodeFunc {
2024-06-03 16:12:12 -04:00
t . Helper ( )
if ! views . SliceContains ( want , use ) {
t . Errorf ( "invalid test: use %v is not in want %v" , use , want )
}
2024-05-31 09:54:46 -04:00
return func ( got views . Slice [ tailcfg . NodeView ] , last tailcfg . StableNodeID ) tailcfg . NodeView {
2024-06-03 16:12:12 -04:00
var ret tailcfg . NodeView
gotIDs := make ( [ ] tailcfg . StableNodeID , got . Len ( ) )
2024-11-11 13:08:47 -08:00
for i , nv := range got . All ( ) {
2024-06-03 16:12:12 -04:00
if ! nv . Valid ( ) {
t . Fatalf ( "invalid node at index %v" , i )
}
gotIDs [ i ] = nv . StableID ( )
if nv . StableID ( ) == use {
ret = nv
}
}
if ! views . SliceEqualAnyOrder ( views . SliceOf ( gotIDs ) , want ) {
t . Errorf ( "candidate nodes = %v, want %v" , gotIDs , want )
}
2024-05-31 09:54:46 -04:00
if last != wantLast {
t . Errorf ( "last node = %v, want %v" , last , wantLast )
}
2024-06-03 16:12:12 -04:00
if ! ret . Valid ( ) {
t . Fatalf ( "did not find matching node in %v, want %v" , gotIDs , use )
}
return ret
}
}
2024-04-15 18:14:20 -04:00
func TestSuggestExitNode ( t * testing . T ) {
2024-06-03 16:49:55 -04:00
t . Parallel ( )
2024-05-31 23:21:55 -04:00
defaultDERPMap := & tailcfg . DERPMap {
Regions : map [ int ] * tailcfg . DERPRegion {
1 : {
Latitude : 32 ,
Longitude : - 97 ,
} ,
2 : { } ,
3 : { } ,
} ,
}
preferred1Report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 20 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
PreferredDERP : 1 ,
}
noLatency1Report := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 0 ,
2 : 0 ,
3 : 0 ,
} ,
PreferredDERP : 1 ,
}
preferredNoneReport := & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 20 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
PreferredDERP : 0 ,
}
dallas := tailcfg . Location {
Latitude : 32.779167 ,
Longitude : - 96.808889 ,
Priority : 100 ,
}
sanJose := tailcfg . Location {
Latitude : 37.3382082 ,
Longitude : - 121.8863286 ,
Priority : 20 ,
}
fortWorth := tailcfg . Location {
Latitude : 32.756389 ,
Longitude : - 97.3325 ,
Priority : 150 ,
}
fortWorthLowPriority := tailcfg . Location {
Latitude : 32.756389 ,
Longitude : - 97.3325 ,
Priority : 100 ,
}
peer1 := makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) )
peer2DERP1 := makePeer ( 2 ,
withDERP ( 1 ) ,
withExitRoutes ( ) ,
withSuggest ( ) )
peer3 := makePeer ( 3 ,
withExitRoutes ( ) ,
withSuggest ( ) )
peer4DERP3 := makePeer ( 4 ,
withDERP ( 3 ) ,
withExitRoutes ( ) ,
withSuggest ( ) )
dallasPeer5 := makePeer ( 5 ,
withName ( "Dallas" ) ,
withoutDERP ( ) ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( dallas . View ( ) ) )
sanJosePeer6 := makePeer ( 6 ,
withName ( "San Jose" ) ,
withoutDERP ( ) ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( sanJose . View ( ) ) )
fortWorthPeer7 := makePeer ( 7 ,
withName ( "Fort Worth" ) ,
withoutDERP ( ) ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( fortWorth . View ( ) ) )
fortWorthPeer8LowPriority := makePeer ( 8 ,
withName ( "Fort Worth Low" ) ,
withoutDERP ( ) ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( fortWorthLowPriority . View ( ) ) )
selfNode := tailcfg . Node {
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
}
defaultNetmap := & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
peer2DERP1 ,
peer3 ,
} ,
}
locationNetmap := & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
dallasPeer5 ,
sanJosePeer6 ,
} ,
}
largeNetmap := & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2DERP1 ,
peer3 ,
peer4DERP3 ,
dallasPeer5 ,
sanJosePeer6 ,
fortWorthPeer7 ,
} ,
}
2024-04-15 18:14:20 -04:00
tests := [ ] struct {
2024-05-31 23:21:55 -04:00
name string
lastReport * netcheck . Report
netMap * netmap . NetworkMap
lastSuggestion tailcfg . StableNodeID
2024-06-03 16:49:55 -04:00
allowPolicy [ ] tailcfg . StableNodeID
2024-05-31 23:21:55 -04:00
2024-06-03 16:12:12 -04:00
wantRegions [ ] int
useRegion int
wantNodes [ ] tailcfg . StableNodeID
2024-04-15 18:14:20 -04:00
wantID tailcfg . StableNodeID
wantName string
wantLocation tailcfg . LocationView
2024-05-31 23:21:55 -04:00
wantError error
2024-04-15 18:14:20 -04:00
} {
{
2024-05-31 23:21:55 -04:00
name : "2 exit nodes in same region" ,
lastReport : preferred1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
peer1 ,
peer2DERP1 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID {
"stable1" ,
"stable2" ,
} ,
2024-05-31 23:21:55 -04:00
wantName : "peer1" ,
wantID : "stable1" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-06-03 16:12:12 -04:00
name : "2 exit nodes different regions unknown latency" ,
lastReport : noLatency1Report ,
netMap : defaultNetmap ,
wantRegions : [ ] int { 1 , 3 } , // the only regions with peers
useRegion : 1 ,
wantName : "peer2" ,
wantID : "stable2" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "2 derp based exit nodes, different regions, equal latency" ,
lastReport : & netcheck . Report {
2024-04-15 18:14:20 -04:00
RegionLatency : map [ int ] time . Duration {
1 : 10 ,
2024-05-31 23:21:55 -04:00
2 : 20 ,
3 : 10 ,
2024-04-15 18:14:20 -04:00
} ,
PreferredDERP : 1 ,
} ,
2024-05-31 23:21:55 -04:00
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
peer1 ,
peer3 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
wantRegions : [ ] int { 1 , 2 } ,
useRegion : 1 ,
wantName : "peer1" ,
wantID : "stable1" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "mullvad nodes, no derp based exit nodes" ,
lastReport : noLatency1Report ,
netMap : locationNetmap ,
wantID : "stable5" ,
wantLocation : dallas . View ( ) ,
wantName : "Dallas" ,
} ,
{
name : "nearby mullvad nodes with different priorities" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
dallasPeer5 ,
sanJosePeer6 ,
fortWorthPeer7 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-05-31 23:21:55 -04:00
wantID : "stable7" ,
wantLocation : fortWorth . View ( ) ,
wantName : "Fort Worth" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "nearby mullvad nodes with same priorities" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
dallasPeer5 ,
sanJosePeer6 ,
fortWorthPeer8LowPriority ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID { "stable5" , "stable8" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable5" ,
wantLocation : dallas . View ( ) ,
wantName : "Dallas" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "mullvad nodes, remaining node is not in preferred derp" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
dallasPeer5 ,
sanJosePeer6 ,
peer4DERP3 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
useRegion : 3 ,
wantID : "stable4" ,
wantName : "peer4" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "no peers" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
} ,
2024-05-31 23:21:55 -04:00
} ,
2024-06-27 14:09:55 -04:00
{
name : "nil report" ,
lastReport : nil ,
netMap : largeNetmap ,
wantError : ErrNoPreferredDERP ,
} ,
2024-05-31 23:21:55 -04:00
{
name : "no preferred derp region" ,
lastReport : preferredNoneReport ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
} ,
2024-05-31 23:21:55 -04:00
wantError : ErrNoPreferredDERP ,
2024-04-15 18:14:20 -04:00
} ,
2024-06-27 14:09:55 -04:00
{
name : "nil netmap" ,
lastReport : noLatency1Report ,
netMap : nil ,
wantError : ErrNoPreferredDERP ,
} ,
{
name : "nil derpmap" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : nil ,
Peers : [ ] tailcfg . NodeView {
dallasPeer5 ,
} ,
} ,
wantError : ErrNoPreferredDERP ,
} ,
2024-04-15 18:14:20 -04:00
{
2024-05-31 23:21:55 -04:00
name : "missing suggestion capability" ,
lastReport : noLatency1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 , withExitRoutes ( ) ) ,
makePeer ( 2 , withLocation ( dallas . View ( ) ) , withExitRoutes ( ) ) ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-05-31 23:21:55 -04:00
} ,
{
name : "prefer last node" ,
lastReport : preferred1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
peer1 ,
peer2DERP1 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-05-31 23:21:55 -04:00
lastSuggestion : "stable2" ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID {
"stable1" ,
"stable2" ,
} ,
wantName : "peer2" ,
wantID : "stable2" ,
2024-04-15 18:14:20 -04:00
} ,
{
2024-05-31 23:21:55 -04:00
name : "found better derp node" ,
lastSuggestion : "stable3" ,
lastReport : preferred1Report ,
netMap : defaultNetmap ,
wantID : "stable2" ,
wantName : "peer2" ,
} ,
{
name : "prefer last mullvad node" ,
lastSuggestion : "stable2" ,
lastReport : preferred1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
Peers : [ ] tailcfg . NodeView {
dallasPeer5 ,
sanJosePeer6 ,
fortWorthPeer8LowPriority ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID { "stable5" , "stable8" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable5" ,
wantName : "Dallas" ,
wantLocation : dallas . View ( ) ,
} ,
{
name : "prefer better mullvad node" ,
lastSuggestion : "stable2" ,
lastReport : preferred1Report ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
DERPMap : defaultDERPMap ,
2024-04-15 18:14:20 -04:00
Peers : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
dallasPeer5 ,
sanJosePeer6 ,
fortWorthPeer7 ,
2024-04-15 18:14:20 -04:00
} ,
} ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID { "stable7" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable7" ,
wantName : "Fort Worth" ,
wantLocation : fortWorth . View ( ) ,
} ,
{
name : "large netmap" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID { "stable1" , "stable2" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable2" ,
wantName : "peer2" ,
} ,
{
name : "no allowed suggestions" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:49:55 -04:00
allowPolicy : [ ] tailcfg . StableNodeID { } ,
2024-05-31 23:21:55 -04:00
} ,
{
name : "only derp suggestions" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:49:55 -04:00
allowPolicy : [ ] tailcfg . StableNodeID { "stable1" , "stable2" , "stable3" } ,
2024-06-03 16:12:12 -04:00
wantNodes : [ ] tailcfg . StableNodeID { "stable1" , "stable2" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable2" ,
wantName : "peer2" ,
} ,
{
name : "only mullvad suggestions" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:49:55 -04:00
allowPolicy : [ ] tailcfg . StableNodeID { "stable5" , "stable6" , "stable7" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable7" ,
wantName : "Fort Worth" ,
wantLocation : fortWorth . View ( ) ,
} ,
{
name : "only worst derp" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:49:55 -04:00
allowPolicy : [ ] tailcfg . StableNodeID { "stable3" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable3" ,
wantName : "peer3" ,
} ,
{
name : "only worst mullvad" ,
lastReport : preferred1Report ,
netMap : largeNetmap ,
2024-06-03 16:49:55 -04:00
allowPolicy : [ ] tailcfg . StableNodeID { "stable6" } ,
2024-05-31 23:21:55 -04:00
wantID : "stable6" ,
wantName : "San Jose" ,
wantLocation : sanJose . View ( ) ,
2024-04-15 18:14:20 -04:00
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2024-06-03 16:12:12 -04:00
wantRegions := tt . wantRegions
if wantRegions == nil {
wantRegions = [ ] int { tt . useRegion }
}
selectRegion := deterministicRegionForTest ( t , views . SliceOf ( wantRegions ) , tt . useRegion )
wantNodes := tt . wantNodes
if wantNodes == nil {
wantNodes = [ ] tailcfg . StableNodeID { tt . wantID }
}
2024-05-31 09:54:46 -04:00
selectNode := deterministicNodeForTest ( t , views . SliceOf ( wantNodes ) , tt . lastSuggestion , tt . wantID )
2024-06-03 16:12:12 -04:00
2024-06-03 16:49:55 -04:00
var allowList set . Set [ tailcfg . StableNodeID ]
if tt . allowPolicy != nil {
allowList = set . SetOf ( tt . allowPolicy )
}
2025-07-03 14:32:28 -05:00
nb := newNodeBackend ( t . Context ( ) , eventbus . New ( ) )
defer nb . shutdown ( errShutdown )
nb . SetNetMap ( tt . netMap )
got , err := suggestExitNode ( tt . lastReport , nb , tt . lastSuggestion , selectRegion , selectNode , allowList )
2024-04-15 18:14:20 -04:00
if got . Name != tt . wantName {
t . Errorf ( "name=%v, want %v" , got . Name , tt . wantName )
}
if got . ID != tt . wantID {
t . Errorf ( "ID=%v, want %v" , got . ID , tt . wantID )
}
if tt . wantError == nil && err != nil {
t . Errorf ( "err=%v, want no error" , err )
}
if tt . wantError != nil && ! errors . Is ( err , tt . wantError ) {
t . Errorf ( "err=%v, want %v" , err , tt . wantError )
}
if ! reflect . DeepEqual ( got . Location , tt . wantLocation ) {
t . Errorf ( "location=%v, want %v" , got . Location , tt . wantLocation )
}
} )
}
}
func TestSuggestExitNodePickWeighted ( t * testing . T ) {
2024-05-31 23:21:55 -04:00
location10 := tailcfg . Location {
Priority : 10 ,
}
location20 := tailcfg . Location {
Priority : 20 ,
}
2024-04-15 18:14:20 -04:00
tests := [ ] struct {
name string
candidates [ ] tailcfg . NodeView
2024-06-03 16:12:12 -04:00
wantIDs [ ] tailcfg . StableNodeID
2024-04-15 18:14:20 -04:00
} {
{
2024-05-31 23:21:55 -04:00
name : "different priorities" ,
2024-04-15 18:14:20 -04:00
candidates : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
makePeer ( 2 , withExitRoutes ( ) , withLocation ( location20 . View ( ) ) ) ,
makePeer ( 3 , withExitRoutes ( ) , withLocation ( location10 . View ( ) ) ) ,
2024-04-15 18:14:20 -04:00
} ,
2024-06-03 16:12:12 -04:00
wantIDs : [ ] tailcfg . StableNodeID { "stable2" } ,
2024-05-31 23:21:55 -04:00
} ,
{
name : "same priorities" ,
candidates : [ ] tailcfg . NodeView {
makePeer ( 2 , withExitRoutes ( ) , withLocation ( location10 . View ( ) ) ) ,
makePeer ( 3 , withExitRoutes ( ) , withLocation ( location10 . View ( ) ) ) ,
} ,
2024-06-03 16:12:12 -04:00
wantIDs : [ ] tailcfg . StableNodeID { "stable2" , "stable3" } ,
2024-04-15 18:14:20 -04:00
} ,
{
name : "<1 candidates" ,
candidates : [ ] tailcfg . NodeView { } ,
} ,
{
name : "1 candidate" ,
candidates : [ ] tailcfg . NodeView {
2024-05-31 23:21:55 -04:00
makePeer ( 2 , withExitRoutes ( ) , withLocation ( location20 . View ( ) ) ) ,
2024-04-15 18:14:20 -04:00
} ,
2024-06-03 16:12:12 -04:00
wantIDs : [ ] tailcfg . StableNodeID { "stable2" } ,
2024-04-15 18:14:20 -04:00
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
got := pickWeighted ( tt . candidates )
2024-06-03 16:12:12 -04:00
gotIDs := make ( [ ] tailcfg . StableNodeID , 0 , len ( got ) )
for _ , n := range got {
if ! n . Valid ( ) {
gotIDs = append ( gotIDs , "<invalid>" )
continue
}
gotIDs = append ( gotIDs , n . StableID ( ) )
2024-05-31 23:21:55 -04:00
}
2024-06-03 16:12:12 -04:00
if ! views . SliceEqualAnyOrder ( views . SliceOf ( gotIDs ) , views . SliceOf ( tt . wantIDs ) ) {
t . Errorf ( "node IDs = %v, want %v" , gotIDs , tt . wantIDs )
2024-04-15 18:14:20 -04:00
}
} )
}
}
func TestSuggestExitNodeLongLatDistance ( t * testing . T ) {
tests := [ ] struct {
name string
fromLat float64
fromLong float64
toLat float64
toLong float64
want float64
} {
{
name : "zero values" ,
fromLat : 0 ,
fromLong : 0 ,
toLat : 0 ,
toLong : 0 ,
want : 0 ,
} ,
{
name : "valid values" ,
fromLat : 40.73061 ,
fromLong : - 73.935242 ,
toLat : 37.3382082 ,
toLong : - 121.8863286 ,
want : 4117266.873301274 ,
} ,
{
name : "valid values, locations in north and south of equator" ,
fromLat : 40.73061 ,
fromLong : - 73.935242 ,
toLat : - 33.861481 ,
toLong : 151.205475 ,
want : 15994089.144368416 ,
} ,
}
// The wanted values are computed using a more precise algorithm using the WGS84 model but
// longLatDistance uses a spherical approximation for simplicity. To account for this, we allow for
// 10km of error.
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
got := longLatDistance ( tt . fromLat , tt . fromLong , tt . toLat , tt . toLong )
const maxError = 10000 // 10km
if math . Abs ( got - tt . want ) > maxError {
t . Errorf ( "distance=%vm, want within %vm of %vm" , got , maxError , tt . want )
}
} )
}
}
2025-07-10 22:15:55 -07:00
func TestSuggestExitNodeTrafficSteering ( t * testing . T ) {
city := & tailcfg . Location {
Country : "Canada" ,
CountryCode : "CA" ,
City : "Montreal" ,
CityCode : "MTR" ,
Latitude : 45.5053 ,
Longitude : - 73.5525 ,
}
noLatLng := & tailcfg . Location {
Country : "Canada" ,
CountryCode : "CA" ,
City : "Montreal" ,
CityCode : "MTR" ,
}
selfNode := tailcfg . Node {
ID : 0 , // randomness is seeded off NetMap.SelfNode.ID
Addresses : [ ] netip . Prefix {
netip . MustParsePrefix ( "100.64.1.1/32" ) ,
netip . MustParsePrefix ( "fe70::1/128" ) ,
} ,
CapMap : tailcfg . NodeCapMap {
tailcfg . NodeAttrTrafficSteering : [ ] tailcfg . RawMessage { } ,
} ,
}
for _ , tt := range [ ] struct {
name string
netMap * netmap . NetworkMap
lastExit tailcfg . StableNodeID
allowPolicy [ ] tailcfg . StableNodeID
wantID tailcfg . StableNodeID
wantName string
wantLoc * tailcfg . Location
wantPri int
wantErr error
} {
{
name : "no-netmap" ,
netMap : nil ,
wantErr : ErrNoNetMap ,
} ,
{
name : "no-nodes" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView { } ,
} ,
wantID : "" ,
} ,
{
name : "no-exit-nodes" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ) ,
} ,
} ,
wantID : "" ,
} ,
{
name : "exit-node-without-suggestion" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ) ,
} ,
} ,
wantID : "" ,
} ,
{
name : "suggested-exit-node-without-routes" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withSuggest ( ) ) ,
} ,
} ,
wantID : "" ,
} ,
{
name : "suggested-exit-node" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
} ,
{
ipn/ipnlocal: use rendezvous hashing to traffic-steer exit nodes
With auto exit nodes enabled, the client picks exit nodes from the
ones advertised in the network map. Usually, it picks the one with the
highest priority score, but when the top spot is tied, it used to pick
randomly. Then, once it made a selection, it would strongly prefer to
stick with that exit node. It wouldn’t even consider another exit node
unless the client was shutdown or the exit node went offline. This is
to prevent flapping, where a client constantly chooses a different
random exit node.
The major problem with this algorithm is that new exit nodes don’t get
selected as often as they should. In fact, they wouldn’t even move
over if a higher scoring exit node appeared.
Let’s say that you have an exit node and it’s overloaded. So you spin
up a new exit node, right beside your existing one, in the hopes that
the traffic will be split across them. But since the client had this
strong affinity, they stick with the exit node they know and love.
Using rendezvous hashing, we can have different clients spread
their selections equally across their top scoring exit nodes. When an
exit node shuts down, its clients will spread themselves evenly to
their other equal options. When an exit node starts, a proportional
number of clients will migrate to their new best option.
Read more: https://en.wikipedia.org/wiki/Rendezvous_hashing
The trade-off is that starting up a new exit node may cause some
clients to move over, interrupting their existing network connections.
So this change is only enabled for tailnets with `traffic-steering`
enabled.
Updates tailscale/corp#29966
Fixes #16551
Signed-off-by: Simon Law <sfllaw@tailscale.com>
2025-07-16 11:50:13 -07:00
name : "suggest-exit-node-stable-pick" ,
2025-07-10 22:15:55 -07:00
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
makePeer ( 3 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
makePeer ( 4 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
} ,
} ,
ipn/ipnlocal: use rendezvous hashing to traffic-steer exit nodes
With auto exit nodes enabled, the client picks exit nodes from the
ones advertised in the network map. Usually, it picks the one with the
highest priority score, but when the top spot is tied, it used to pick
randomly. Then, once it made a selection, it would strongly prefer to
stick with that exit node. It wouldn’t even consider another exit node
unless the client was shutdown or the exit node went offline. This is
to prevent flapping, where a client constantly chooses a different
random exit node.
The major problem with this algorithm is that new exit nodes don’t get
selected as often as they should. In fact, they wouldn’t even move
over if a higher scoring exit node appeared.
Let’s say that you have an exit node and it’s overloaded. So you spin
up a new exit node, right beside your existing one, in the hopes that
the traffic will be split across them. But since the client had this
strong affinity, they stick with the exit node they know and love.
Using rendezvous hashing, we can have different clients spread
their selections equally across their top scoring exit nodes. When an
exit node shuts down, its clients will spread themselves evenly to
their other equal options. When an exit node starts, a proportional
number of clients will migrate to their new best option.
Read more: https://en.wikipedia.org/wiki/Rendezvous_hashing
The trade-off is that starting up a new exit node may cause some
clients to move over, interrupting their existing network connections.
So this change is only enabled for tailnets with `traffic-steering`
enabled.
Updates tailscale/corp#29966
Fixes #16551
Signed-off-by: Simon Law <sfllaw@tailscale.com>
2025-07-16 11:50:13 -07:00
// Change this, if the hashing function changes.
2025-07-10 22:15:55 -07:00
wantID : "stable3" ,
wantName : "peer3" ,
} ,
{
name : "exit-nodes-with-and-without-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 1 ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantPri : 1 ,
} ,
{
name : "exit-nodes-without-and-with-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 1 ) ) ,
} ,
} ,
wantID : "stable2" ,
wantName : "peer2" ,
wantPri : 1 ,
} ,
{
name : "exit-nodes-with-negative-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 1 ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 2 ) ) ,
makePeer ( 3 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 3 ) ) ,
makePeer ( 4 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 4 ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantPri : - 1 ,
} ,
{
name : "exit-nodes-no-priority-beats-negative-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 1 ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( - 2 ) ) ,
makePeer ( 3 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
} ,
} ,
wantID : "stable3" ,
wantName : "peer3" ,
} ,
{
name : "exit-nodes-same-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 1 ) ) ,
makePeer ( 2 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 2 ) ) , // top
makePeer ( 3 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 1 ) ) ,
makePeer ( 4 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 2 ) ) , // top
makePeer ( 5 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 2 ) ) , // top
makePeer ( 6 ,
withExitRoutes ( ) ,
withSuggest ( ) ) ,
makePeer ( 7 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocationPriority ( 2 ) ) , // top
} ,
} ,
wantID : "stable5" ,
wantName : "peer5" ,
wantPri : 2 ,
} ,
{
name : "suggested-exit-node-with-city" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( city . View ( ) ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantLoc : city ,
} ,
{
name : "suggested-exit-node-with-city-and-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( city . View ( ) ) ,
withLocationPriority ( 1 ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantLoc : city ,
wantPri : 1 ,
} ,
{
name : "suggested-exit-node-without-latlng" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( noLatLng . View ( ) ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantLoc : noLatLng ,
} ,
{
name : "suggested-exit-node-without-latlng-with-priority" ,
netMap : & netmap . NetworkMap {
SelfNode : selfNode . View ( ) ,
Peers : [ ] tailcfg . NodeView {
makePeer ( 1 ,
withExitRoutes ( ) ,
withSuggest ( ) ,
withLocation ( noLatLng . View ( ) ) ,
withLocationPriority ( 1 ) ) ,
} ,
} ,
wantID : "stable1" ,
wantName : "peer1" ,
wantLoc : noLatLng ,
wantPri : 1 ,
} ,
} {
t . Run ( tt . name , func ( t * testing . T ) {
var allowList set . Set [ tailcfg . StableNodeID ]
if tt . allowPolicy != nil {
allowList = set . SetOf ( tt . allowPolicy )
}
// HACK: NetMap.AllCaps is populated by Control:
if tt . netMap != nil {
caps := maps . Keys ( tt . netMap . SelfNode . CapMap ( ) . AsMap ( ) )
tt . netMap . AllCaps = set . SetOf ( slices . Collect ( caps ) )
}
nb := newNodeBackend ( t . Context ( ) , eventbus . New ( ) )
defer nb . shutdown ( errShutdown )
nb . SetNetMap ( tt . netMap )
ipn/ipnlocal: use rendezvous hashing to traffic-steer exit nodes
With auto exit nodes enabled, the client picks exit nodes from the
ones advertised in the network map. Usually, it picks the one with the
highest priority score, but when the top spot is tied, it used to pick
randomly. Then, once it made a selection, it would strongly prefer to
stick with that exit node. It wouldn’t even consider another exit node
unless the client was shutdown or the exit node went offline. This is
to prevent flapping, where a client constantly chooses a different
random exit node.
The major problem with this algorithm is that new exit nodes don’t get
selected as often as they should. In fact, they wouldn’t even move
over if a higher scoring exit node appeared.
Let’s say that you have an exit node and it’s overloaded. So you spin
up a new exit node, right beside your existing one, in the hopes that
the traffic will be split across them. But since the client had this
strong affinity, they stick with the exit node they know and love.
Using rendezvous hashing, we can have different clients spread
their selections equally across their top scoring exit nodes. When an
exit node shuts down, its clients will spread themselves evenly to
their other equal options. When an exit node starts, a proportional
number of clients will migrate to their new best option.
Read more: https://en.wikipedia.org/wiki/Rendezvous_hashing
The trade-off is that starting up a new exit node may cause some
clients to move over, interrupting their existing network connections.
So this change is only enabled for tailnets with `traffic-steering`
enabled.
Updates tailscale/corp#29966
Fixes #16551
Signed-off-by: Simon Law <sfllaw@tailscale.com>
2025-07-16 11:50:13 -07:00
got , err := suggestExitNodeUsingTrafficSteering ( nb , allowList )
2025-07-10 22:15:55 -07:00
if tt . wantErr == nil && err != nil {
t . Fatalf ( "err=%v, want nil" , err )
}
if tt . wantErr != nil && ! errors . Is ( err , tt . wantErr ) {
t . Fatalf ( "err=%v, want %v" , err , tt . wantErr )
}
if got . Name != tt . wantName {
t . Errorf ( "name=%q, want %q" , got . Name , tt . wantName )
}
if got . ID != tt . wantID {
t . Errorf ( "ID=%q, want %q" , got . ID , tt . wantID )
}
wantLoc := tt . wantLoc
if tt . wantPri != 0 {
if wantLoc == nil {
wantLoc = new ( tailcfg . Location )
}
wantLoc . Priority = tt . wantPri
}
if diff := cmp . Diff ( got . Location . AsStruct ( ) , wantLoc ) ; diff != "" {
t . Errorf ( "location mismatch (+want -got)\n%s" , diff )
}
} )
}
}
2024-04-15 18:14:20 -04:00
func TestMinLatencyDERPregion ( t * testing . T ) {
tests := [ ] struct {
name string
regions [ ] int
report * netcheck . Report
wantRegion int
} {
{
name : "regions, no latency values" ,
regions : [ ] int { 1 , 2 , 3 } ,
wantRegion : 0 ,
report : & netcheck . Report { } ,
} ,
{
name : "regions, different latency values" ,
regions : [ ] int { 1 , 2 , 3 } ,
wantRegion : 2 ,
report : & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 5 * time . Millisecond ,
3 : 30 * time . Millisecond ,
} ,
} ,
} ,
{
name : "regions, same values" ,
regions : [ ] int { 1 , 2 , 3 } ,
wantRegion : 1 ,
report : & netcheck . Report {
RegionLatency : map [ int ] time . Duration {
1 : 10 * time . Millisecond ,
2 : 10 * time . Millisecond ,
3 : 10 * time . Millisecond ,
} ,
} ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
got := minLatencyDERPRegion ( tt . regions , tt . report )
if got != tt . wantRegion {
t . Errorf ( "got region %v want region %v" , got , tt . wantRegion )
}
} )
}
}
2024-04-19 13:37:21 -07:00
func TestEnableAutoUpdates ( t * testing . T ) {
lb := newTestLocalBackend ( t )
_ , err := lb . EditPrefs ( & ipn . MaskedPrefs {
AutoUpdateSet : ipn . AutoUpdatePrefsMask {
ApplySet : true ,
} ,
Prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Apply : opt . NewBool ( true ) ,
} ,
} ,
} )
// Enabling may fail, depending on which environment we are running this
// test in.
wantErr := ! clientupdate . CanAutoUpdate ( )
gotErr := err != nil
if gotErr != wantErr {
t . Fatalf ( "enabling auto-updates: got error: %v (%v); want error: %v" , gotErr , err , wantErr )
}
// Disabling should always succeed.
if _ , err := lb . EditPrefs ( & ipn . MaskedPrefs {
AutoUpdateSet : ipn . AutoUpdatePrefsMask {
ApplySet : true ,
} ,
Prefs : ipn . Prefs {
AutoUpdate : ipn . AutoUpdatePrefs {
Apply : opt . NewBool ( false ) ,
} ,
} ,
} ) ; err != nil {
t . Fatalf ( "disabling auto-updates: got error: %v" , err )
}
}
2024-04-11 14:06:12 -07:00
func TestReadWriteRouteInfo ( t * testing . T ) {
// set up a backend with more than one profile
b := newTestBackend ( t )
prof1 := ipn . LoginProfile { ID : "id1" , Key : "key1" }
prof2 := ipn . LoginProfile { ID : "id2" , Key : "key2" }
2025-01-30 11:24:25 -06:00
b . pm . knownProfiles [ "id1" ] = prof1 . View ( )
b . pm . knownProfiles [ "id2" ] = prof2 . View ( )
b . pm . currentProfile = prof1 . View ( )
2024-04-11 14:06:12 -07:00
// set up routeInfo
ri1 := & appc . RouteInfo { }
ri1 . Wildcards = [ ] string { "1" }
ri2 := & appc . RouteInfo { }
ri2 . Wildcards = [ ] string { "2" }
// read before write
readRi , err := b . readRouteInfoLocked ( )
if readRi != nil {
t . Fatalf ( "read before writing: want nil, got %v" , readRi )
}
if err != ipn . ErrStateNotExist {
t . Fatalf ( "read before writing: want %v, got %v" , ipn . ErrStateNotExist , err )
}
// write the first routeInfo
if err := b . storeRouteInfo ( ri1 ) ; err != nil {
t . Fatal ( err )
}
// write the other routeInfo as the other profile
ipn, ipn/ipnlocal: reduce coupling between LocalBackend/profileManager and the Windows-specific "current user" model
Ultimately, we'd like to get rid of the concept of the "current user". It is only used on Windows,
but even then it doesn't work well in multi-user and enterprise/managed Windows environments.
In this PR, we update LocalBackend and profileManager to decouple them a bit more from this obsolete concept.
This is done in a preparation for extracting ipnlocal.Extension-related interfaces and types, and using them
to implement optional features like tailscale/corp#27645, instead of continuing growing the core ipnlocal logic.
Notably, we rename (*profileManager).SetCurrentUserAndProfile() to SwitchToProfile() and change its signature
to accept an ipn.LoginProfileView instead of an ipn.ProfileID and ipn.WindowsUserID. Since we're not removing
the "current user" completely just yet, the method sets the current user to the owner of the target profile.
We also update the profileResolver callback type, which is typically implemented by LocalBackend extensions,
to return an ipn.LoginProfileView instead of ipn.ProfileID and ipn.WindowsUserID.
Updates tailscale/corp#27645
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-04-05 22:15:26 -05:00
if _ , _ , err := b . pm . SwitchToProfileByID ( "id2" ) ; err != nil {
2024-04-11 14:06:12 -07:00
t . Fatal ( err )
}
if err := b . storeRouteInfo ( ri2 ) ; err != nil {
t . Fatal ( err )
}
// read the routeInfo of the first profile
ipn, ipn/ipnlocal: reduce coupling between LocalBackend/profileManager and the Windows-specific "current user" model
Ultimately, we'd like to get rid of the concept of the "current user". It is only used on Windows,
but even then it doesn't work well in multi-user and enterprise/managed Windows environments.
In this PR, we update LocalBackend and profileManager to decouple them a bit more from this obsolete concept.
This is done in a preparation for extracting ipnlocal.Extension-related interfaces and types, and using them
to implement optional features like tailscale/corp#27645, instead of continuing growing the core ipnlocal logic.
Notably, we rename (*profileManager).SetCurrentUserAndProfile() to SwitchToProfile() and change its signature
to accept an ipn.LoginProfileView instead of an ipn.ProfileID and ipn.WindowsUserID. Since we're not removing
the "current user" completely just yet, the method sets the current user to the owner of the target profile.
We also update the profileResolver callback type, which is typically implemented by LocalBackend extensions,
to return an ipn.LoginProfileView instead of ipn.ProfileID and ipn.WindowsUserID.
Updates tailscale/corp#27645
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-04-05 22:15:26 -05:00
if _ , _ , err := b . pm . SwitchToProfileByID ( "id1" ) ; err != nil {
2024-04-11 14:06:12 -07:00
t . Fatal ( err )
}
readRi , err = b . readRouteInfoLocked ( )
if err != nil {
t . Fatal ( err )
}
if ! slices . Equal ( readRi . Wildcards , ri1 . Wildcards ) {
t . Fatalf ( "read prof1 routeInfo wildcards: want %v, got %v" , ri1 . Wildcards , readRi . Wildcards )
}
// read the routeInfo of the second profile
ipn, ipn/ipnlocal: reduce coupling between LocalBackend/profileManager and the Windows-specific "current user" model
Ultimately, we'd like to get rid of the concept of the "current user". It is only used on Windows,
but even then it doesn't work well in multi-user and enterprise/managed Windows environments.
In this PR, we update LocalBackend and profileManager to decouple them a bit more from this obsolete concept.
This is done in a preparation for extracting ipnlocal.Extension-related interfaces and types, and using them
to implement optional features like tailscale/corp#27645, instead of continuing growing the core ipnlocal logic.
Notably, we rename (*profileManager).SetCurrentUserAndProfile() to SwitchToProfile() and change its signature
to accept an ipn.LoginProfileView instead of an ipn.ProfileID and ipn.WindowsUserID. Since we're not removing
the "current user" completely just yet, the method sets the current user to the owner of the target profile.
We also update the profileResolver callback type, which is typically implemented by LocalBackend extensions,
to return an ipn.LoginProfileView instead of ipn.ProfileID and ipn.WindowsUserID.
Updates tailscale/corp#27645
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2025-04-05 22:15:26 -05:00
if _ , _ , err := b . pm . SwitchToProfileByID ( "id2" ) ; err != nil {
2024-04-11 14:06:12 -07:00
t . Fatal ( err )
}
readRi , err = b . readRouteInfoLocked ( )
if err != nil {
t . Fatal ( err )
}
if ! slices . Equal ( readRi . Wildcards , ri2 . Wildcards ) {
t . Fatalf ( "read prof2 routeInfo wildcards: want %v, got %v" , ri2 . Wildcards , readRi . Wildcards )
}
}
2024-06-03 16:49:55 -04:00
func TestFillAllowedSuggestions ( t * testing . T ) {
tests := [ ] struct {
name string
allowPolicy [ ] string
want [ ] tailcfg . StableNodeID
} {
{
name : "unset" ,
} ,
{
name : "zero" ,
allowPolicy : [ ] string { } ,
want : [ ] tailcfg . StableNodeID { } ,
} ,
{
name : "one" ,
allowPolicy : [ ] string { "one" } ,
want : [ ] tailcfg . StableNodeID { "one" } ,
} ,
{
name : "many" ,
allowPolicy : [ ] string { "one" , "two" , "three" , "four" } ,
want : [ ] tailcfg . StableNodeID { "one" , "three" , "four" , "two" } , // order should not matter
} ,
{
name : "preserve case" ,
allowPolicy : [ ] string { "ABC" , "def" , "gHiJ" } ,
want : [ ] tailcfg . StableNodeID { "ABC" , "def" , "gHiJ" } ,
} ,
}
2024-10-08 10:50:14 -05:00
syspolicy . RegisterWellKnownSettingsForTest ( t )
2024-06-03 16:49:55 -04:00
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2024-10-08 10:50:14 -05:00
policyStore := source . NewTestStoreOf ( t , source . TestSettingOf (
syspolicy . AllowedSuggestedExitNodes , tt . allowPolicy ,
) )
syspolicy . MustRegisterStoreForTest ( t , "TestStore" , setting . DeviceScope , policyStore )
2024-06-03 16:49:55 -04:00
got := fillAllowedSuggestions ( )
if got == nil {
if tt . want == nil {
return
}
t . Errorf ( "got nil, want %v" , tt . want )
}
if tt . want == nil {
t . Errorf ( "got %v, want nil" , got )
}
if ! got . Equal ( set . SetOf ( tt . want ) ) {
t . Errorf ( "got %v, want %v" , got , tt . want )
}
} )
}
}
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
func TestNotificationTargetMatch ( t * testing . T ) {
tests := [ ] struct {
name string
target notificationTarget
actor ipnauth . Actor
wantMatch bool
} {
{
name : "AllClients/Nil" ,
target : allClients ,
actor : nil ,
wantMatch : true ,
} ,
{
name : "AllClients/NoUID/NoCID" ,
target : allClients ,
actor : & ipnauth . TestActor { } ,
wantMatch : true ,
} ,
{
name : "AllClients/WithUID/NoCID" ,
target : allClients ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . NoClientID } ,
wantMatch : true ,
} ,
{
name : "AllClients/NoUID/WithCID" ,
target : allClients ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "AllClients/WithUID/WithCID" ,
target : allClients ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "FilterByUID/Nil" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : nil ,
wantMatch : false ,
} ,
{
name : "FilterByUID/NoUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { } ,
wantMatch : false ,
} ,
{
name : "FilterByUID/NoUID/WithCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID/SameUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" } ,
wantMatch : true ,
} ,
{
name : "FilterByUID/DifferentUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-5-6-7-8" } ,
wantMatch : false ,
} ,
{
name : "FilterByUID/SameUID/WithCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "FilterByUID/DifferentUID/WithCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-5-6-7-8" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByCID/Nil" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : nil ,
wantMatch : false ,
} ,
{
name : "FilterByCID/NoUID/NoCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { } ,
wantMatch : false ,
} ,
{
name : "FilterByCID/NoUID/SameCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "FilterByCID/NoUID/DifferentCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "B" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByCID/WithUID/NoCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" } ,
wantMatch : false ,
} ,
{
name : "FilterByCID/WithUID/SameCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "FilterByCID/WithUID/DifferentCID" ,
target : notificationTarget { clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "B" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/Nil" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" } ,
actor : nil ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/NoUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/NoUID/SameCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/NoUID/DifferentCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { CID : ipnauth . ClientIDFrom ( "B" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/SameUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/SameUID/SameCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : true ,
} ,
{
name : "FilterByUID+CID/SameUID/DifferentCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-1-2-3-4" , CID : ipnauth . ClientIDFrom ( "B" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/DifferentUID/NoCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-5-6-7-8" } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/DifferentUID/SameCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-5-6-7-8" , CID : ipnauth . ClientIDFrom ( "A" ) } ,
wantMatch : false ,
} ,
{
name : "FilterByUID+CID/DifferentUID/DifferentCID" ,
target : notificationTarget { userID : "S-1-5-21-1-2-3-4" , clientID : ipnauth . ClientIDFrom ( "A" ) } ,
actor : & ipnauth . TestActor { UID : "S-1-5-21-5-6-7-8" , CID : ipnauth . ClientIDFrom ( "B" ) } ,
wantMatch : false ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
gotMatch := tt . target . match ( tt . actor )
if gotMatch != tt . wantMatch {
t . Errorf ( "match: got %v; want %v" , gotMatch , tt . wantMatch )
}
} )
}
}
type newTestControlFn func ( tb testing . TB , opts controlclient . Options ) controlclient . Client
func newLocalBackendWithTestControl ( t * testing . T , enableLogging bool , newControl newTestControlFn ) * LocalBackend {
2025-03-20 15:18:29 -07:00
return newLocalBackendWithSysAndTestControl ( t , enableLogging , tsd . NewSystem ( ) , newControl )
2025-03-14 15:17:26 -05:00
}
func newLocalBackendWithSysAndTestControl ( t * testing . T , enableLogging bool , sys * tsd . System , newControl newTestControlFn ) * LocalBackend {
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
logf := logger . Discard
if enableLogging {
logf = tstest . WhileTestRunningLogger ( t )
}
2025-03-14 15:17:26 -05:00
if _ , hasStore := sys . StateStore . GetOK ( ) ; ! hasStore {
store := new ( mem . Store )
sys . Set ( store )
}
if _ , hasEngine := sys . Engine . GetOK ( ) ; ! hasEngine {
2025-03-19 10:47:25 -07:00
e , err := wgengine . NewFakeUserspaceEngine ( logf , sys . Set , sys . HealthTracker ( ) , sys . UserMetricsRegistry ( ) , sys . Bus . Get ( ) )
2025-03-14 15:17:26 -05:00
if err != nil {
t . Fatalf ( "NewFakeUserspaceEngine: %v" , err )
}
t . Cleanup ( e . Close )
sys . Set ( e )
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
}
b , err := NewLocalBackend ( logf , logid . PublicID { } , sys , 0 )
if err != nil {
t . Fatalf ( "NewLocalBackend: %v" , err )
}
2024-11-22 08:25:54 -06:00
t . Cleanup ( b . Shutdown )
ipn/{ipnauth,ipnlocal,ipnserver}: send the auth URL to the user who started interactive login
We add the ClientID() method to the ipnauth.Actor interface and updated ipnserver.actor to implement it.
This method returns a unique ID of the connected client if the actor represents one. It helps link a series
of interactions initiated by the client, such as when a notification needs to be sent back to a specific session,
rather than all active sessions, in response to a certain request.
We also add LocalBackend.WatchNotificationsAs and LocalBackend.StartLoginInteractiveAs methods,
which are like WatchNotifications and StartLoginInteractive but accept an additional parameter
specifying an ipnauth.Actor who initiates the operation. We store these actor identities in
watchSession.owner and LocalBackend.authActor, respectively,and implement LocalBackend.sendTo
and related helper methods to enable sending notifications to watchSessions associated with actors
(or, more broadly, identifiable recipients).
We then use the above to change who receives the BrowseToURL notifications:
- For user-initiated, interactive logins, the notification is delivered only to the user who initiated the
process. If the initiating actor represents a specific connected client, the URL notification is sent back
to the same LocalAPI client that called StartLoginInteractive. Otherwise, the notification is sent to all
clients connected as that user.
Currently, we only differentiate between users on Windows, as it is inherently a multi-user OS.
- In all other cases (e.g., node key expiration), we send the notification to all connected users.
Updates tailscale/corp#18342
Signed-off-by: Nick Khyl <nickk@tailscale.com>
2024-10-13 11:36:46 -05:00
b . DisablePortMapperForTest ( )
b . SetControlClientGetterForTesting ( func ( opts controlclient . Options ) ( controlclient . Client , error ) {
return newControl ( t , opts ) , nil
} )
return b
}
// notificationHandler is any function that can process (e.g., check) a notification.
// It returns whether the notification has been handled or should be passed to the next handler.
// The handler may be called from any goroutine, so it must avoid calling functions
// that are restricted to the goroutine running the test or benchmark function,
// such as [testing.common.FailNow] and [testing.common.Fatalf].
type notificationHandler func ( testing . TB , ipnauth . Actor , * ipn . Notify ) bool
// wantedNotification names a [notificationHandler] that processes a notification
// the test expects and wants to receive. The name is used to report notifications
// that haven't been received within the expected timeout.
type wantedNotification struct {
name string
cond notificationHandler
}
// notificationWatcher observes [LocalBackend] notifications as the specified actor,
// reporting missing but expected notifications using [testing.common.Error],
// and delegating the handling of unexpected notifications to the [notificationHandler]s.
type notificationWatcher struct {
tb testing . TB
lb * LocalBackend
actor ipnauth . Actor
mu sync . Mutex
mask ipn . NotifyWatchOpt
want [ ] wantedNotification // notifications we want to receive
unexpected [ ] notificationHandler // funcs that are called to check any other notifications
ctxCancel context . CancelFunc // cancels the outstanding [LocalBackend.WatchNotificationsAs] call
got [ ] * ipn . Notify // all notifications, both wanted and unexpected, we've received so far
gotWanted [ ] * ipn . Notify // only the expected notifications; holds nil for any notification that hasn't been received
gotWantedCh chan struct { } // closed when we have received the last wanted notification
doneCh chan struct { } // closed when [LocalBackend.WatchNotificationsAs] returns
}
func newNotificationWatcher ( tb testing . TB , lb * LocalBackend , actor ipnauth . Actor ) * notificationWatcher {
return & notificationWatcher { tb : tb , lb : lb , actor : actor }
}
func ( w * notificationWatcher ) watch ( mask ipn . NotifyWatchOpt , wanted [ ] wantedNotification , unexpected ... notificationHandler ) {
w . tb . Helper ( )
// Cancel any outstanding [LocalBackend.WatchNotificationsAs] calls.
w . mu . Lock ( )
ctxCancel := w . ctxCancel
doneCh := w . doneCh
w . mu . Unlock ( )
if doneCh != nil {
ctxCancel ( )
<- doneCh
}
doneCh = make ( chan struct { } )
gotWantedCh := make ( chan struct { } )
ctx , ctxCancel := context . WithCancel ( context . Background ( ) )
w . tb . Cleanup ( func ( ) {
ctxCancel ( )
<- doneCh
} )
w . mu . Lock ( )
w . mask = mask
w . want = wanted
w . unexpected = unexpected
w . ctxCancel = ctxCancel
w . got = nil
w . gotWanted = make ( [ ] * ipn . Notify , len ( wanted ) )
w . gotWantedCh = gotWantedCh
w . doneCh = doneCh
w . mu . Unlock ( )
watchAddedCh := make ( chan struct { } )
go func ( ) {
defer close ( doneCh )
if len ( wanted ) == 0 {
close ( gotWantedCh )
if len ( unexpected ) == 0 {
close ( watchAddedCh )
return
}
}
var nextWantIdx int
w . lb . WatchNotificationsAs ( ctx , w . actor , w . mask , func ( ) { close ( watchAddedCh ) } , func ( notify * ipn . Notify ) ( keepGoing bool ) {
w . tb . Helper ( )
w . mu . Lock ( )
defer w . mu . Unlock ( )
w . got = append ( w . got , notify )
wanted := false
for i := nextWantIdx ; i < len ( w . want ) ; i ++ {
if wanted = w . want [ i ] . cond ( w . tb , w . actor , notify ) ; wanted {
w . gotWanted [ i ] = notify
nextWantIdx = i + 1
break
}
}
if wanted && nextWantIdx == len ( w . want ) {
close ( w . gotWantedCh )
if len ( w . unexpected ) == 0 {
// If we have received the last wanted notification,
// and we don't have any handlers for the unexpected notifications,
// we can stop the watcher right away.
return false
}
}
if ! wanted {
// If we've received a notification we didn't expect,
// it could either be an unwanted notification caused by a bug
// or just a miscellaneous one that's irrelevant for the current test.
// Call unexpected notification handlers, if any, to
// check and fail the test if necessary.
for _ , h := range w . unexpected {
if h ( w . tb , w . actor , notify ) {
break
}
}
}
return true
} )
} ( )
<- watchAddedCh
}
func ( w * notificationWatcher ) check ( ) [ ] * ipn . Notify {
w . tb . Helper ( )
w . mu . Lock ( )
cancel := w . ctxCancel
gotWantedCh := w . gotWantedCh
checkUnexpected := len ( w . unexpected ) != 0
doneCh := w . doneCh
w . mu . Unlock ( )
// Wait for up to 10 seconds to receive expected notifications.
timeout := 10 * time . Second
for {
select {
case <- gotWantedCh :
if checkUnexpected {
gotWantedCh = nil
// But do not wait longer than 500ms for unexpected notifications after
// the expected notifications have been received.
timeout = 500 * time . Millisecond
continue
}
case <- doneCh :
// [LocalBackend.WatchNotificationsAs] has already returned, so no further
// notifications will be received. There's no reason to wait any longer.
case <- time . After ( timeout ) :
}
cancel ( )
<- doneCh
break
}
// Report missing notifications, if any, and log all received notifications,
// including both expected and unexpected ones.
w . mu . Lock ( )
defer w . mu . Unlock ( )
if hasMissing := slices . Contains ( w . gotWanted , nil ) ; hasMissing {
want := make ( [ ] string , len ( w . want ) )
got := make ( [ ] string , 0 , len ( w . want ) )
for i , wn := range w . want {
want [ i ] = wn . name
if w . gotWanted [ i ] != nil {
got = append ( got , wn . name )
}
}
w . tb . Errorf ( "Notifications(%s): got %q; want %q" , actorDescriptionForTest ( w . actor ) , strings . Join ( got , ", " ) , strings . Join ( want , ", " ) )
for i , n := range w . got {
w . tb . Logf ( "%d. %v" , i , n )
}
return nil
}
return w . gotWanted
}
func actorDescriptionForTest ( actor ipnauth . Actor ) string {
var parts [ ] string
if actor != nil {
if name , _ := actor . Username ( ) ; name != "" {
parts = append ( parts , name )
}
if uid := actor . UserID ( ) ; uid != "" {
parts = append ( parts , string ( uid ) )
}
if clientID , _ := actor . ClientID ( ) ; clientID != ipnauth . NoClientID {
parts = append ( parts , clientID . String ( ) )
}
}
return fmt . Sprintf ( "Actor{%s}" , strings . Join ( parts , ", " ) )
}
func TestLoginNotifications ( t * testing . T ) {
const (
enableLogging = true
controlURL = "https://localhost:1/"
loginURL = "https://localhost:1/1"
)
wantBrowseToURL := wantedNotification {
name : "BrowseToURL" ,
cond : func ( t testing . TB , actor ipnauth . Actor , n * ipn . Notify ) bool {
if n . BrowseToURL != nil && * n . BrowseToURL != loginURL {
t . Errorf ( "BrowseToURL (%s): got %q; want %q" , actorDescriptionForTest ( actor ) , * n . BrowseToURL , loginURL )
return false
}
return n . BrowseToURL != nil
} ,
}
unexpectedBrowseToURL := func ( t testing . TB , actor ipnauth . Actor , n * ipn . Notify ) bool {
if n . BrowseToURL != nil {
t . Errorf ( "Unexpected BrowseToURL(%s): %v" , actorDescriptionForTest ( actor ) , n )
return true
}
return false
}
tests := [ ] struct {
name string
logInAs ipnauth . Actor
urlExpectedBy [ ] ipnauth . Actor
urlUnexpectedBy [ ] ipnauth . Actor
} {
{
name : "NoObservers" ,
logInAs : & ipnauth . TestActor { UID : "A" } ,
urlExpectedBy : [ ] ipnauth . Actor { } , // ensure that it does not panic if no one is watching
} ,
{
name : "SingleUser" ,
logInAs : & ipnauth . TestActor { UID : "A" } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" } } ,
} ,
{
name : "SameUser/TwoSessions/NoCID" ,
logInAs : & ipnauth . TestActor { UID : "A" } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" } , & ipnauth . TestActor { UID : "A" } } ,
} ,
{
name : "SameUser/TwoSessions/OneWithCID" ,
logInAs : & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "123" ) } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "123" ) } } ,
urlUnexpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" } } ,
} ,
{
name : "SameUser/TwoSessions/BothWithCID" ,
logInAs : & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "123" ) } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "123" ) } } ,
urlUnexpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "456" ) } } ,
} ,
{
name : "DifferentUsers/NoCID" ,
logInAs : & ipnauth . TestActor { UID : "A" } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" } } ,
urlUnexpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "B" } } ,
} ,
{
name : "DifferentUsers/SameCID" ,
logInAs : & ipnauth . TestActor { UID : "A" } ,
urlExpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "A" , CID : ipnauth . ClientIDFrom ( "123" ) } } ,
urlUnexpectedBy : [ ] ipnauth . Actor { & ipnauth . TestActor { UID : "B" , CID : ipnauth . ClientIDFrom ( "123" ) } } ,
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
t . Parallel ( )
lb := newLocalBackendWithTestControl ( t , enableLogging , func ( tb testing . TB , opts controlclient . Options ) controlclient . Client {
return newClient ( tb , opts )
} )
if _ , err := lb . EditPrefs ( & ipn . MaskedPrefs { ControlURLSet : true , Prefs : ipn . Prefs { ControlURL : controlURL } } ) ; err != nil {
t . Fatalf ( "(*EditPrefs).Start(): %v" , err )
}
if err := lb . Start ( ipn . Options { } ) ; err != nil {
t . Fatalf ( "(*LocalBackend).Start(): %v" , err )
}
sessions := make ( [ ] * notificationWatcher , 0 , len ( tt . urlExpectedBy ) + len ( tt . urlUnexpectedBy ) )
for _ , actor := range tt . urlExpectedBy {
session := newNotificationWatcher ( t , lb , actor )
session . watch ( 0 , [ ] wantedNotification { wantBrowseToURL } )
sessions = append ( sessions , session )
}
for _ , actor := range tt . urlUnexpectedBy {
session := newNotificationWatcher ( t , lb , actor )
session . watch ( 0 , nil , unexpectedBrowseToURL )
sessions = append ( sessions , session )
}
if err := lb . StartLoginInteractiveAs ( context . Background ( ) , tt . logInAs ) ; err != nil {
t . Fatal ( err )
}
lb . cc . ( * mockControl ) . send ( nil , loginURL , false , nil )
var wg sync . WaitGroup
wg . Add ( len ( sessions ) )
for _ , sess := range sessions {
go func ( ) { // check all sessions in parallel
sess . check ( )
wg . Done ( )
} ( )
}
wg . Wait ( )
} )
}
}
2024-10-31 08:30:11 -07:00
// TestConfigFileReload tests that the LocalBackend reloads its configuration
// when the configuration file changes.
func TestConfigFileReload ( t * testing . T ) {
2025-03-20 14:40:36 +00:00
type testCase struct {
name string
initial * conffile . Config
updated * conffile . Config
checkFn func ( * testing . T , * LocalBackend )
2024-10-31 08:30:11 -07:00
}
2025-03-20 14:40:36 +00:00
tests := [ ] testCase {
{
name : "hostname_change" ,
initial : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
Hostname : ptr . To ( "initial-host" ) ,
} ,
} ,
updated : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
Hostname : ptr . To ( "updated-host" ) ,
} ,
} ,
checkFn : func ( t * testing . T , b * LocalBackend ) {
if got := b . Prefs ( ) . Hostname ( ) ; got != "updated-host" {
t . Errorf ( "hostname = %q; want updated-host" , got )
}
} ,
} ,
{
name : "start_advertising_services" ,
initial : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
} ,
} ,
updated : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
AdvertiseServices : [ ] string { "svc:abc" , "svc:def" } ,
} ,
} ,
checkFn : func ( t * testing . T , b * LocalBackend ) {
if got := b . Prefs ( ) . AdvertiseServices ( ) . AsSlice ( ) ; ! reflect . DeepEqual ( got , [ ] string { "svc:abc" , "svc:def" } ) {
t . Errorf ( "AdvertiseServices = %v; want [svc:abc, svc:def]" , got )
}
} ,
} ,
{
name : "change_advertised_services" ,
initial : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
AdvertiseServices : [ ] string { "svc:abc" , "svc:def" } ,
} ,
} ,
updated : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
AdvertiseServices : [ ] string { "svc:abc" , "svc:ghi" } ,
} ,
} ,
checkFn : func ( t * testing . T , b * LocalBackend ) {
if got := b . Prefs ( ) . AdvertiseServices ( ) . AsSlice ( ) ; ! reflect . DeepEqual ( got , [ ] string { "svc:abc" , "svc:ghi" } ) {
t . Errorf ( "AdvertiseServices = %v; want [svc:abc, svc:ghi]" , got )
}
} ,
} ,
{
name : "unset_advertised_services" ,
initial : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
AdvertiseServices : [ ] string { "svc:abc" } ,
} ,
} ,
updated : & conffile . Config {
Parsed : ipn . ConfigVAlpha {
Version : "alpha0" ,
} ,
} ,
checkFn : func ( t * testing . T , b * LocalBackend ) {
if b . Prefs ( ) . AdvertiseServices ( ) . Len ( ) != 0 {
t . Errorf ( "got %d AdvertiseServices wants none" , b . Prefs ( ) . AdvertiseServices ( ) . Len ( ) )
}
} ,
} ,
2024-10-31 08:30:11 -07:00
}
2025-03-20 14:40:36 +00:00
for _ , tc := range tests {
t . Run ( tc . name , func ( t * testing . T ) {
dir := t . TempDir ( )
path := filepath . Join ( dir , "tailscale.conf" )
// Write initial config
initialJSON , err := json . Marshal ( tc . initial . Parsed )
if err != nil {
t . Fatal ( err )
}
if err := os . WriteFile ( path , initialJSON , 0644 ) ; err != nil {
t . Fatal ( err )
}
// Create backend with initial config
tc . initial . Path = path
tc . initial . Raw = initialJSON
2025-03-20 15:18:29 -07:00
sys := tsd . NewSystem ( )
2025-03-19 09:47:06 -07:00
sys . InitialConfig = tc . initial
2025-03-20 14:40:36 +00:00
b := newTestLocalBackendWithSys ( t , sys )
// Update config file
updatedJSON , err := json . Marshal ( tc . updated . Parsed )
if err != nil {
t . Fatal ( err )
}
if err := os . WriteFile ( path , updatedJSON , 0644 ) ; err != nil {
t . Fatal ( err )
}
// Trigger reload
if ok , err := b . ReloadConfig ( ) ; ! ok || err != nil {
t . Fatalf ( "ReloadConfig() = %v, %v; want true, nil" , ok , err )
}
// Check outcome
tc . checkFn ( t , b )
} )
2024-10-31 08:30:11 -07:00
}
}
2024-11-15 16:14:06 -05:00
func TestGetVIPServices ( t * testing . T ) {
tests := [ ] struct {
2025-01-06 11:27:11 -05:00
name string
advertised [ ] string
serveConfig * ipn . ServeConfig
want [ ] * tailcfg . VIPService
2024-11-15 16:14:06 -05:00
} {
{
"advertised-only" ,
[ ] string { "svc:abc" , "svc:def" } ,
2025-01-06 11:27:11 -05:00
& ipn . ServeConfig { } ,
2024-11-15 16:14:06 -05:00
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Active : true ,
} ,
{
Name : "svc:def" ,
Active : true ,
} ,
} ,
} ,
{
2025-01-06 11:27:11 -05:00
"served-only" ,
2024-11-15 16:14:06 -05:00
[ ] string { } ,
2025-01-06 11:27:11 -05:00
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { Tun : true } ,
} ,
} ,
2024-11-15 16:14:06 -05:00
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Ports : [ ] tailcfg . ProtoPortRange { { Ports : tailcfg . PortRangeAny } } ,
} ,
} ,
} ,
{
2025-01-06 11:27:11 -05:00
"served-and-advertised" ,
2024-11-15 16:14:06 -05:00
[ ] string { "svc:abc" } ,
2025-01-06 11:27:11 -05:00
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { Tun : true } ,
} ,
} ,
2024-11-15 16:14:06 -05:00
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Active : true ,
Ports : [ ] tailcfg . ProtoPortRange { { Ports : tailcfg . PortRangeAny } } ,
} ,
} ,
} ,
{
2025-01-06 11:27:11 -05:00
"served-and-advertised-different-service" ,
2024-11-15 16:14:06 -05:00
[ ] string { "svc:def" } ,
2025-01-06 11:27:11 -05:00
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { Tun : true } ,
} ,
} ,
2024-11-15 16:14:06 -05:00
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Ports : [ ] tailcfg . ProtoPortRange { { Ports : tailcfg . PortRangeAny } } ,
} ,
{
Name : "svc:def" ,
Active : true ,
} ,
} ,
} ,
2025-01-06 11:27:11 -05:00
{
"served-with-port-ranges-one-range-single" ,
[ ] string { } ,
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { TCP : map [ uint16 ] * ipn . TCPPortHandler {
80 : { HTTPS : true } ,
} } ,
} ,
} ,
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Ports : [ ] tailcfg . ProtoPortRange { { Proto : 6 , Ports : tailcfg . PortRange { First : 80 , Last : 80 } } } ,
} ,
} ,
} ,
{
"served-with-port-ranges-one-range-multiple" ,
[ ] string { } ,
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { TCP : map [ uint16 ] * ipn . TCPPortHandler {
80 : { HTTPS : true } ,
81 : { HTTPS : true } ,
82 : { HTTPS : true } ,
} } ,
} ,
} ,
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Ports : [ ] tailcfg . ProtoPortRange { { Proto : 6 , Ports : tailcfg . PortRange { First : 80 , Last : 82 } } } ,
} ,
} ,
} ,
{
"served-with-port-ranges-multiple-ranges" ,
[ ] string { } ,
& ipn . ServeConfig {
2025-01-21 17:07:34 -05:00
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
2025-01-06 11:27:11 -05:00
"svc:abc" : { TCP : map [ uint16 ] * ipn . TCPPortHandler {
80 : { HTTPS : true } ,
81 : { HTTPS : true } ,
82 : { HTTPS : true } ,
1212 : { HTTPS : true } ,
1213 : { HTTPS : true } ,
1214 : { HTTPS : true } ,
} } ,
} ,
} ,
[ ] * tailcfg . VIPService {
{
Name : "svc:abc" ,
Ports : [ ] tailcfg . ProtoPortRange {
{ Proto : 6 , Ports : tailcfg . PortRange { First : 80 , Last : 82 } } ,
{ Proto : 6 , Ports : tailcfg . PortRange { First : 1212 , Last : 1214 } } ,
} ,
} ,
} ,
} ,
2024-11-15 16:14:06 -05:00
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2025-01-06 11:27:11 -05:00
lb := newLocalBackendWithTestControl ( t , false , func ( tb testing . TB , opts controlclient . Options ) controlclient . Client {
return newClient ( tb , opts )
} )
lb . serveConfig = tt . serveConfig . View ( )
2024-11-15 16:14:06 -05:00
prefs := & ipn . Prefs {
AdvertiseServices : tt . advertised ,
}
2025-01-06 11:27:11 -05:00
got := lb . vipServicesFromPrefsLocked ( prefs . View ( ) )
2024-11-15 16:14:06 -05:00
slices . SortFunc ( got , func ( a , b * tailcfg . VIPService ) int {
2025-01-21 17:07:34 -05:00
return strings . Compare ( a . Name . String ( ) , b . Name . String ( ) )
2024-11-15 16:14:06 -05:00
} )
if ! reflect . DeepEqual ( tt . want , got ) {
t . Logf ( "want:" )
for _ , s := range tt . want {
t . Logf ( "%+v" , s )
}
t . Logf ( "got:" )
for _ , s := range got {
t . Logf ( "%+v" , s )
}
t . Fail ( )
return
}
} )
}
}
2024-11-22 08:45:53 -06:00
func TestUpdatePrefsOnSysPolicyChange ( t * testing . T ) {
const enableLogging = false
type fieldChange struct {
name string
want any
}
wantPrefsChanges := func ( want ... fieldChange ) * wantedNotification {
return & wantedNotification {
name : "Prefs" ,
cond : func ( t testing . TB , actor ipnauth . Actor , n * ipn . Notify ) bool {
if n . Prefs != nil {
prefs := reflect . Indirect ( reflect . ValueOf ( n . Prefs . AsStruct ( ) ) )
for _ , f := range want {
got := prefs . FieldByName ( f . name ) . Interface ( )
if ! reflect . DeepEqual ( got , f . want ) {
t . Errorf ( "%v: got %v; want %v" , f . name , got , f . want )
}
}
}
return n . Prefs != nil
} ,
}
}
unexpectedPrefsChange := func ( t testing . TB , _ ipnauth . Actor , n * ipn . Notify ) bool {
if n . Prefs != nil {
t . Errorf ( "Unexpected Prefs: %v" , n . Prefs . Pretty ( ) )
return true
}
return false
}
tests := [ ] struct {
name string
initialPrefs * ipn . Prefs
stringSettings [ ] source . TestSetting [ string ]
want * wantedNotification
} {
{
name : "ShieldsUp/True" ,
stringSettings : [ ] source . TestSetting [ string ] { source . TestSettingOf ( syspolicy . EnableIncomingConnections , "never" ) } ,
want : wantPrefsChanges ( fieldChange { "ShieldsUp" , true } ) ,
} ,
{
name : "ShieldsUp/False" ,
initialPrefs : & ipn . Prefs { ShieldsUp : true } ,
stringSettings : [ ] source . TestSetting [ string ] { source . TestSettingOf ( syspolicy . EnableIncomingConnections , "always" ) } ,
want : wantPrefsChanges ( fieldChange { "ShieldsUp" , false } ) ,
} ,
{
name : "ExitNodeID" ,
stringSettings : [ ] source . TestSetting [ string ] { source . TestSettingOf ( syspolicy . ExitNodeID , "foo" ) } ,
want : wantPrefsChanges ( fieldChange { "ExitNodeID" , tailcfg . StableNodeID ( "foo" ) } ) ,
} ,
{
name : "EnableRunExitNode" ,
stringSettings : [ ] source . TestSetting [ string ] { source . TestSettingOf ( syspolicy . EnableRunExitNode , "always" ) } ,
want : wantPrefsChanges ( fieldChange { "AdvertiseRoutes" , [ ] netip . Prefix { tsaddr . AllIPv4 ( ) , tsaddr . AllIPv6 ( ) } } ) ,
} ,
{
name : "Multiple" ,
initialPrefs : & ipn . Prefs {
ExitNodeAllowLANAccess : true ,
} ,
stringSettings : [ ] source . TestSetting [ string ] {
source . TestSettingOf ( syspolicy . EnableServerMode , "always" ) ,
source . TestSettingOf ( syspolicy . ExitNodeAllowLANAccess , "never" ) ,
source . TestSettingOf ( syspolicy . ExitNodeIP , "127.0.0.1" ) ,
} ,
want : wantPrefsChanges (
fieldChange { "ForceDaemon" , true } ,
fieldChange { "ExitNodeAllowLANAccess" , false } ,
fieldChange { "ExitNodeIP" , netip . MustParseAddr ( "127.0.0.1" ) } ,
) ,
} ,
{
name : "NoChange" ,
initialPrefs : & ipn . Prefs {
CorpDNS : true ,
ExitNodeID : "foo" ,
AdvertiseRoutes : [ ] netip . Prefix { tsaddr . AllIPv4 ( ) , tsaddr . AllIPv6 ( ) } ,
} ,
stringSettings : [ ] source . TestSetting [ string ] {
source . TestSettingOf ( syspolicy . EnableTailscaleDNS , "always" ) ,
source . TestSettingOf ( syspolicy . ExitNodeID , "foo" ) ,
source . TestSettingOf ( syspolicy . EnableRunExitNode , "always" ) ,
} ,
want : nil , // syspolicy settings match the preferences; no change notification is expected.
} ,
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
syspolicy . RegisterWellKnownSettingsForTest ( t )
store := source . NewTestStoreOf [ string ] ( t )
syspolicy . MustRegisterStoreForTest ( t , "TestSource" , setting . DeviceScope , store )
lb := newLocalBackendWithTestControl ( t , enableLogging , func ( tb testing . TB , opts controlclient . Options ) controlclient . Client {
return newClient ( tb , opts )
} )
if tt . initialPrefs != nil {
lb . SetPrefsForTest ( tt . initialPrefs )
}
if err := lb . Start ( ipn . Options { } ) ; err != nil {
t . Fatalf ( "(*LocalBackend).Start(): %v" , err )
}
nw := newNotificationWatcher ( t , lb , & ipnauth . TestActor { } )
if tt . want != nil {
nw . watch ( 0 , [ ] wantedNotification { * tt . want } )
} else {
nw . watch ( 0 , nil , unexpectedPrefsChange )
}
store . SetStrings ( tt . stringSettings ... )
nw . check ( )
} )
}
}
2025-01-21 05:17:27 +00:00
2025-06-06 15:20:23 -04:00
func TestUpdateIngressAndServiceHashLocked ( t * testing . T ) {
prefs := ipn . NewPrefs ( ) . View ( )
previousSC := & ipn . ServeConfig {
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
"svc:abc" : { Tun : true } ,
} ,
}
2025-01-21 05:17:27 +00:00
tests := [ ] struct {
name string
hi * tailcfg . Hostinfo
2025-06-06 15:20:23 -04:00
hasPreviousSC bool // whether to overwrite the ServeConfig hash in the Hostinfo using previousSC
2025-01-21 05:17:27 +00:00
sc * ipn . ServeConfig
wantIngress bool
wantWireIngress bool
wantControlUpdate bool
} {
{
name : "no_hostinfo_no_serve_config" ,
hi : nil ,
} ,
{
name : "empty_hostinfo_no_serve_config" ,
hi : & tailcfg . Hostinfo { } ,
} ,
{
name : "empty_hostinfo_funnel_enabled" ,
hi : & tailcfg . Hostinfo { } ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : true ,
} ,
} ,
wantIngress : true ,
2025-02-16 09:38:02 +00:00
wantWireIngress : false , // implied by wantIngress
2025-01-21 05:17:27 +00:00
wantControlUpdate : true ,
} ,
2025-06-06 15:20:23 -04:00
{
name : "empty_hostinfo_service_configured" ,
hi : & tailcfg . Hostinfo { } ,
sc : & ipn . ServeConfig {
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
"svc:abc" : { Tun : true } ,
} ,
} ,
wantControlUpdate : true ,
} ,
2025-01-21 05:17:27 +00:00
{
name : "empty_hostinfo_funnel_disabled" ,
hi : & tailcfg . Hostinfo { } ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : false ,
} ,
} ,
wantWireIngress : true , // true if there is any AllowFunnel block
wantControlUpdate : true ,
} ,
{
2025-06-06 15:20:23 -04:00
name : "empty_hostinfo_no_funnel_no_service" ,
2025-01-21 05:17:27 +00:00
hi : & tailcfg . Hostinfo { } ,
sc : & ipn . ServeConfig {
TCP : map [ uint16 ] * ipn . TCPPortHandler {
80 : { HTTPS : true } ,
} ,
} ,
} ,
{
name : "funnel_enabled_no_change" ,
hi : & tailcfg . Hostinfo {
IngressEnabled : true ,
} ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : true ,
} ,
} ,
wantIngress : true ,
2025-02-16 09:38:02 +00:00
wantWireIngress : false , // implied by wantIngress
2025-01-21 05:17:27 +00:00
} ,
2025-06-06 15:20:23 -04:00
{
name : "service_hash_no_change" ,
hi : & tailcfg . Hostinfo { } ,
hasPreviousSC : true ,
sc : & ipn . ServeConfig {
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
"svc:abc" : { Tun : true } ,
} ,
} ,
} ,
2025-01-21 05:17:27 +00:00
{
name : "funnel_disabled_no_change" ,
hi : & tailcfg . Hostinfo {
WireIngress : true ,
} ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : false ,
} ,
} ,
wantWireIngress : true , // true if there is any AllowFunnel block
} ,
2025-06-06 15:20:23 -04:00
{
name : "service_got_removed" ,
hi : & tailcfg . Hostinfo { } ,
hasPreviousSC : true ,
sc : & ipn . ServeConfig { } ,
wantControlUpdate : true ,
} ,
2025-01-21 05:17:27 +00:00
{
name : "funnel_changes_to_disabled" ,
hi : & tailcfg . Hostinfo {
IngressEnabled : true ,
} ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : false ,
} ,
} ,
wantWireIngress : true , // true if there is any AllowFunnel block
wantControlUpdate : true ,
} ,
{
name : "funnel_changes_to_enabled" ,
hi : & tailcfg . Hostinfo {
WireIngress : true ,
} ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : true ,
} ,
} ,
wantIngress : true ,
2025-02-16 09:38:02 +00:00
wantWireIngress : false , // implied by wantIngress
2025-01-21 05:17:27 +00:00
wantControlUpdate : true ,
} ,
2025-06-06 15:20:23 -04:00
{
name : "both_funnel_and_service_changes" ,
hi : & tailcfg . Hostinfo {
IngressEnabled : true ,
} ,
sc : & ipn . ServeConfig {
AllowFunnel : map [ ipn . HostPort ] bool {
"tailnet.xyz:443" : false ,
} ,
Services : map [ tailcfg . ServiceName ] * ipn . ServiceConfig {
"svc:abc" : { Tun : true } ,
} ,
} ,
wantWireIngress : true , // true if there is any AllowFunnel block
wantControlUpdate : true ,
} ,
2025-01-21 05:17:27 +00:00
}
for _ , tt := range tests {
t . Run ( tt . name , func ( t * testing . T ) {
2025-06-06 15:20:23 -04:00
t . Parallel ( )
2025-01-21 05:17:27 +00:00
b := newTestLocalBackend ( t )
b . hostinfo = tt . hi
2025-06-06 15:20:23 -04:00
if tt . hasPreviousSC {
b . mu . Lock ( )
b . serveConfig = previousSC . View ( )
b . hostinfo . ServicesHash = b . vipServiceHash ( b . vipServicesFromPrefsLocked ( prefs ) )
b . mu . Unlock ( )
}
2025-01-21 05:17:27 +00:00
b . serveConfig = tt . sc . View ( )
allDone := make ( chan bool , 1 )
defer b . goTracker . AddDoneCallback ( func ( ) {
b . mu . Lock ( )
defer b . mu . Unlock ( )
if b . goTracker . RunningGoroutines ( ) > 0 {
return
}
select {
case allDone <- true :
default :
}
} ) ( )
was := b . goTracker . StartedGoroutines ( )
2025-06-06 15:20:23 -04:00
b . updateIngressAndServiceHashLocked ( prefs )
2025-01-21 05:17:27 +00:00
if tt . hi != nil {
if tt . hi . IngressEnabled != tt . wantIngress {
t . Errorf ( "IngressEnabled = %v, want %v" , tt . hi . IngressEnabled , tt . wantIngress )
}
if tt . hi . WireIngress != tt . wantWireIngress {
t . Errorf ( "WireIngress = %v, want %v" , tt . hi . WireIngress , tt . wantWireIngress )
}
2025-06-06 15:20:23 -04:00
b . mu . Lock ( )
svcHash := b . vipServiceHash ( b . vipServicesFromPrefsLocked ( prefs ) )
b . mu . Unlock ( )
if tt . hi . ServicesHash != svcHash {
t . Errorf ( "ServicesHash = %v, want %v" , tt . hi . ServicesHash , svcHash )
}
2025-01-21 05:17:27 +00:00
}
startedGoroutine := b . goTracker . StartedGoroutines ( ) != was
if startedGoroutine != tt . wantControlUpdate {
t . Errorf ( "control update triggered = %v, want %v" , startedGoroutine , tt . wantControlUpdate )
}
if startedGoroutine {
select {
case <- time . After ( 5 * time . Second ) :
t . Fatal ( "timed out waiting for goroutine to finish" )
case <- allDone :
}
}
} )
}
}
2025-03-07 15:07:00 +00:00
// TestSrcCapPacketFilter tests that LocalBackend handles packet filters with
// SrcCaps instead of Srcs (IPs)
func TestSrcCapPacketFilter ( t * testing . T ) {
lb := newLocalBackendWithTestControl ( t , false , func ( tb testing . TB , opts controlclient . Options ) controlclient . Client {
return newClient ( tb , opts )
} )
if err := lb . Start ( ipn . Options { } ) ; err != nil {
t . Fatalf ( "(*LocalBackend).Start(): %v" , err )
}
var k key . NodePublic
must . Do ( k . UnmarshalText ( [ ] byte ( "nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261" ) ) )
controlClient := lb . cc . ( * mockControl )
controlClient . send ( nil , "" , false , & netmap . NetworkMap {
SelfNode : ( & tailcfg . Node {
Addresses : [ ] netip . Prefix { netip . MustParsePrefix ( "1.1.1.1/32" ) } ,
} ) . View ( ) ,
Peers : [ ] tailcfg . NodeView {
( & tailcfg . Node {
Addresses : [ ] netip . Prefix { netip . MustParsePrefix ( "2.2.2.2/32" ) } ,
ID : 2 ,
Key : k ,
CapMap : tailcfg . NodeCapMap { "cap-X" : nil } , // node 2 has cap
} ) . View ( ) ,
( & tailcfg . Node {
Addresses : [ ] netip . Prefix { netip . MustParsePrefix ( "3.3.3.3/32" ) } ,
ID : 3 ,
Key : k ,
CapMap : tailcfg . NodeCapMap { } , // node 3 does not have the cap
} ) . View ( ) ,
} ,
PacketFilter : [ ] filtertype . Match { {
IPProto : views . SliceOf ( [ ] ipproto . Proto { ipproto . TCP } ) ,
SrcCaps : [ ] tailcfg . NodeCapability { "cap-X" } , // cap in packet filter rule
Dsts : [ ] filtertype . NetPortRange { {
Net : netip . MustParsePrefix ( "1.1.1.1/32" ) ,
Ports : filtertype . PortRange {
First : 22 ,
Last : 22 ,
} ,
} } ,
} } ,
} )
2025-03-21 09:30:42 +00:00
f := lb . GetFilterForTest ( )
2025-03-07 15:07:00 +00:00
res := f . Check ( netip . MustParseAddr ( "2.2.2.2" ) , netip . MustParseAddr ( "1.1.1.1" ) , 22 , ipproto . TCP )
if res != filter . Accept {
t . Errorf ( "Check(2.2.2.2, ...) = %s, want %s" , res , filter . Accept )
}
res = f . Check ( netip . MustParseAddr ( "3.3.3.3" ) , netip . MustParseAddr ( "1.1.1.1" ) , 22 , ipproto . TCP )
if ! res . IsDrop ( ) {
t . Error ( "IsDrop() for node without cap = false, want true" )
}
}
2025-05-07 17:01:40 +01:00
func TestDisplayMessages ( t * testing . T ) {
b := newTestLocalBackend ( t )
// Pretend we're in a map poll so health updates get processed
ht := b . HealthTracker ( )
ht . SetIPNState ( "NeedsLogin" , true )
ht . GotStreamedMapResponse ( )
2025-06-03 15:09:34 +01:00
b . mu . Lock ( )
defer b . mu . Unlock ( )
2025-05-07 17:01:40 +01:00
b . setNetMapLocked ( & netmap . NetworkMap {
DisplayMessages : map [ tailcfg . DisplayMessageID ] tailcfg . DisplayMessage {
"test-message" : {
Title : "Testing" ,
} ,
} ,
} )
state := ht . CurrentState ( )
2025-06-06 15:53:30 +01:00
wantID := health . WarnableCode ( "control-health.test-message" )
_ , ok := state . Warnings [ wantID ]
2025-05-07 17:01:40 +01:00
if ! ok {
2025-06-06 15:53:30 +01:00
t . Errorf ( "no warning found with id %q" , wantID )
2025-05-07 17:01:40 +01:00
}
}
// TestDisplayMessagesURLFilter tests that we filter out any URLs that are not
// valid as a pop browser URL (see [LocalBackend.validPopBrowserURL]).
func TestDisplayMessagesURLFilter ( t * testing . T ) {
b := newTestLocalBackend ( t )
// Pretend we're in a map poll so health updates get processed
ht := b . HealthTracker ( )
ht . SetIPNState ( "NeedsLogin" , true )
ht . GotStreamedMapResponse ( )
2025-06-03 15:09:34 +01:00
b . mu . Lock ( )
defer b . mu . Unlock ( )
2025-05-07 17:01:40 +01:00
b . setNetMapLocked ( & netmap . NetworkMap {
DisplayMessages : map [ tailcfg . DisplayMessageID ] tailcfg . DisplayMessage {
"test-message" : {
Title : "Testing" ,
Severity : tailcfg . SeverityHigh ,
PrimaryAction : & tailcfg . DisplayMessageAction {
URL : "https://www.evil.com" ,
Label : "Phishing Link" ,
} ,
} ,
} ,
} )
state := ht . CurrentState ( )
2025-06-06 15:53:30 +01:00
wantID := health . WarnableCode ( "control-health.test-message" )
got , ok := state . Warnings [ wantID ]
2025-05-07 17:01:40 +01:00
if ! ok {
2025-06-06 15:53:30 +01:00
t . Fatalf ( "no warning found with id %q" , wantID )
2025-05-07 17:01:40 +01:00
}
want := health . UnhealthyState {
2025-06-06 15:53:30 +01:00
WarnableCode : wantID ,
2025-05-07 17:01:40 +01:00
Title : "Testing" ,
Severity : health . SeverityHigh ,
}
if diff := cmp . Diff ( want , got ) ; diff != "" {
t . Errorf ( "Unexpected message content (-want/+got):\n%s" , diff )
}
}
2025-06-03 15:09:34 +01:00
// TestDisplayMessageIPNBus checks that we send health messages appropriately
// based on whether the watcher has sent the [ipn.NotifyHealthActions] watch
// option or not.
func TestDisplayMessageIPNBus ( t * testing . T ) {
type test struct {
name string
mask ipn . NotifyWatchOpt
wantWarning health . UnhealthyState
}
msgs := map [ tailcfg . DisplayMessageID ] tailcfg . DisplayMessage {
"test-message" : {
Title : "Message title" ,
Text : "Message text." ,
Severity : tailcfg . SeverityMedium ,
PrimaryAction : & tailcfg . DisplayMessageAction {
URL : "https://example.com" ,
Label : "Learn more" ,
} ,
} ,
}
2025-06-06 15:53:30 +01:00
wantID := health . WarnableCode ( "control-health.test-message" )
2025-06-03 15:09:34 +01:00
for _ , tt := range [ ] test {
{
name : "older-client-no-actions" ,
mask : 0 ,
wantWarning : health . UnhealthyState {
2025-06-06 15:53:30 +01:00
WarnableCode : wantID ,
2025-06-03 15:09:34 +01:00
Severity : health . SeverityMedium ,
Title : "Message title" ,
Text : "Message text. Learn more: https://example.com" , // PrimaryAction appended to text
PrimaryAction : nil , // PrimaryAction not included
} ,
} ,
{
name : "new-client-with-actions" ,
mask : ipn . NotifyHealthActions ,
wantWarning : health . UnhealthyState {
2025-06-06 15:53:30 +01:00
WarnableCode : wantID ,
2025-06-03 15:09:34 +01:00
Severity : health . SeverityMedium ,
Title : "Message title" ,
Text : "Message text." ,
PrimaryAction : & health . UnhealthyStateAction {
URL : "https://example.com" ,
Label : "Learn more" ,
} ,
} ,
} ,
} {
t . Run ( tt . name , func ( t * testing . T ) {
t . Parallel ( )
lb := newLocalBackendWithTestControl ( t , false , func ( tb testing . TB , opts controlclient . Options ) controlclient . Client {
return newClient ( tb , opts )
} )
ipnWatcher := newNotificationWatcher ( t , lb , nil )
ipnWatcher . watch ( tt . mask , [ ] wantedNotification { {
2025-06-06 15:53:30 +01:00
name : fmt . Sprintf ( "warning with ID %q" , wantID ) ,
2025-06-03 15:09:34 +01:00
cond : func ( _ testing . TB , _ ipnauth . Actor , n * ipn . Notify ) bool {
if n . Health == nil {
return false
}
2025-06-06 15:53:30 +01:00
got , ok := n . Health . Warnings [ wantID ]
2025-06-03 15:09:34 +01:00
if ok {
if diff := cmp . Diff ( tt . wantWarning , got ) ; diff != "" {
t . Errorf ( "unexpected warning details (-want/+got):\n%s" , diff )
return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting
}
2025-06-06 15:53:30 +01:00
} else {
got := slices . Collect ( maps . Keys ( n . Health . Warnings ) )
t . Logf ( "saw warnings: %v" , got )
2025-06-03 15:09:34 +01:00
}
return ok
} ,
} } )
lb . SetPrefsForTest ( & ipn . Prefs {
ControlURL : "https://localhost:1/" ,
WantRunning : true ,
LoggedOut : false ,
} )
if err := lb . Start ( ipn . Options { } ) ; err != nil {
t . Fatalf ( "(*LocalBackend).Start(): %v" , err )
}
cc := lb . cc . ( * mockControl )
// Assert that we are logged in and authorized, and also send our DisplayMessages
cc . send ( nil , "" , true , & netmap . NetworkMap {
SelfNode : ( & tailcfg . Node { MachineAuthorized : true } ) . View ( ) ,
DisplayMessages : msgs ,
} )
// Tell the health tracker that we are in a map poll because
// mockControl doesn't tell it
lb . HealthTracker ( ) . GotStreamedMapResponse ( )
// Assert that we got the expected notification
ipnWatcher . check ( )
} )
}
}
2025-07-03 20:32:30 -05:00
2025-07-07 17:04:07 -05:00
func checkError ( tb testing . TB , got , want error , fatal bool ) {
tb . Helper ( )
f := tb . Errorf
if fatal {
f = tb . Fatalf
}
if ( want == nil ) != ( got == nil ) ||
( want != nil && got != nil && want . Error ( ) != got . Error ( ) && ! errors . Is ( got , want ) ) {
f ( "gotErr: %v; wantErr: %v" , got , want )
}
}
2025-07-03 20:32:30 -05:00
func toStrings [ T ~ string ] ( in [ ] T ) [ ] string {
out := make ( [ ] string , len ( in ) )
for i , v := range in {
out [ i ] = string ( v )
}
return out
}