mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-11 21:27:31 +00:00
types/netmap, all: use read-only tailcfg.NodeView in NetworkMap
Updates #8948 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:

committed by
Brad Fitzpatrick

parent
b040094b90
commit
58a4fd43d8
@@ -38,6 +38,14 @@ func ips(ss ...string) (ips []netip.Addr) {
|
||||
return
|
||||
}
|
||||
|
||||
func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView {
|
||||
nv := make([]tailcfg.NodeView, len(v))
|
||||
for i, n := range v {
|
||||
nv[i] = n.View()
|
||||
}
|
||||
return nv
|
||||
}
|
||||
|
||||
func TestDNSConfigForNetmap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -62,7 +70,7 @@ func TestDNSConfigForNetmap(t *testing.T) {
|
||||
nm: &netmap.NetworkMap{
|
||||
Name: "myname.net",
|
||||
Addresses: ipps("100.101.101.101"),
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{
|
||||
Name: "peera.net",
|
||||
Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001", "fe75::1002"),
|
||||
@@ -75,7 +83,7 @@ func TestDNSConfigForNetmap(t *testing.T) {
|
||||
Name: "v6-only.net",
|
||||
Addresses: ipps("fe75::3"), // no IPv4, so we don't ignore IPv6
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
prefs: &ipn.Prefs{},
|
||||
want: &dns.Config{
|
||||
@@ -96,7 +104,7 @@ func TestDNSConfigForNetmap(t *testing.T) {
|
||||
nm: &netmap.NetworkMap{
|
||||
Name: "myname.net",
|
||||
Addresses: ipps("fe75::1"),
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{
|
||||
Name: "peera.net",
|
||||
Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001"),
|
||||
@@ -109,7 +117,7 @@ func TestDNSConfigForNetmap(t *testing.T) {
|
||||
Name: "v6-only.net",
|
||||
Addresses: ipps("fe75::3"), // no IPv4, so we don't ignore IPv6
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
prefs: &ipn.Prefs{},
|
||||
want: &dns.Config{
|
||||
|
@@ -87,24 +87,26 @@ func (em *expiryManager) flagExpiredPeers(netmap *netmap.NetworkMap, localNow ti
|
||||
return
|
||||
}
|
||||
|
||||
for _, peer := range netmap.Peers {
|
||||
for i, peer := range netmap.Peers {
|
||||
// Nodes that don't expire have KeyExpiry set to the zero time;
|
||||
// skip those and peers that are already marked as expired
|
||||
// (e.g. from control).
|
||||
if peer.KeyExpiry.IsZero() || peer.KeyExpiry.After(controlNow) {
|
||||
delete(em.previouslyExpired, peer.StableID)
|
||||
if peer.KeyExpiry().IsZero() || peer.KeyExpiry().After(controlNow) {
|
||||
delete(em.previouslyExpired, peer.StableID())
|
||||
continue
|
||||
} else if peer.Expired {
|
||||
} else if peer.Expired() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !em.previouslyExpired[peer.StableID] {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: clearing expired peer %v", peer.StableID)
|
||||
em.previouslyExpired[peer.StableID] = true
|
||||
if !em.previouslyExpired[peer.StableID()] {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: clearing expired peer %v", peer.StableID())
|
||||
em.previouslyExpired[peer.StableID()] = true
|
||||
}
|
||||
|
||||
mut := peer.AsStruct()
|
||||
|
||||
// Actually mark the node as expired
|
||||
peer.Expired = true
|
||||
mut.Expired = true
|
||||
|
||||
// Control clears the Endpoints and DERP fields of expired
|
||||
// nodes; do so here as well. The Expired bool is the correct
|
||||
@@ -113,12 +115,14 @@ func (em *expiryManager) flagExpiredPeers(netmap *netmap.NetworkMap, localNow ti
|
||||
// NOTE: this is insufficient to actually break connectivity,
|
||||
// since we discover endpoints via DERP, and due to DERP return
|
||||
// path optimization.
|
||||
peer.Endpoints = nil
|
||||
peer.DERP = ""
|
||||
mut.Endpoints = nil
|
||||
mut.DERP = ""
|
||||
|
||||
// Defense-in-depth: break the node's public key as well, in
|
||||
// case something tries to communicate.
|
||||
peer.Key = key.NodePublicWithBadOldPrefix(peer.Key)
|
||||
mut.Key = key.NodePublicWithBadOldPrefix(peer.Key())
|
||||
|
||||
netmap.Peers[i] = mut.View()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,13 +148,13 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim
|
||||
|
||||
var nextExpiry time.Time // zero if none
|
||||
for _, peer := range nm.Peers {
|
||||
if peer.KeyExpiry.IsZero() {
|
||||
if peer.KeyExpiry().IsZero() {
|
||||
continue // tagged node
|
||||
} else if peer.Expired {
|
||||
} else if peer.Expired() {
|
||||
// Peer already expired; Expired is set by the
|
||||
// flagExpiredPeers function, above.
|
||||
continue
|
||||
} else if peer.KeyExpiry.Before(controlNow) {
|
||||
} else if peer.KeyExpiry().Before(controlNow) {
|
||||
// This peer already expired, and peer.Expired
|
||||
// isn't set for some reason. Skip this node.
|
||||
continue
|
||||
@@ -160,8 +164,8 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim
|
||||
// an expiry; otherwise, only update if this node's expiry is
|
||||
// sooner than the currently-stored one (since we want the
|
||||
// soonest-occurring expiry time).
|
||||
if nextExpiry.IsZero() || peer.KeyExpiry.Before(nextExpiry) {
|
||||
nextExpiry = peer.KeyExpiry
|
||||
if nextExpiry.IsZero() || peer.KeyExpiry().Before(nextExpiry) {
|
||||
nextExpiry = peer.KeyExpiry()
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -44,38 +44,38 @@ func TestFlagExpiredPeers(t *testing.T) {
|
||||
name string
|
||||
controlTime *time.Time
|
||||
netmap *netmap.NetworkMap
|
||||
want []*tailcfg.Node
|
||||
want []tailcfg.NodeView
|
||||
}{
|
||||
{
|
||||
name: "no_expiry",
|
||||
controlTime: &now,
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeInFuture),
|
||||
},
|
||||
}),
|
||||
},
|
||||
want: []*tailcfg.Node{
|
||||
want: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeInFuture),
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "expiry",
|
||||
controlTime: &now,
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeInPast),
|
||||
},
|
||||
}),
|
||||
},
|
||||
want: []*tailcfg.Node{
|
||||
want: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeInPast, func(n *tailcfg.Node) {
|
||||
n.Expired = true
|
||||
n.Key = expiredKey
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "bad_ControlTime",
|
||||
@@ -83,29 +83,29 @@ func TestFlagExpiredPeers(t *testing.T) {
|
||||
controlTime: &timeBeforeEpoch,
|
||||
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeBeforeEpoch.Add(-1*time.Hour)), // before ControlTime
|
||||
},
|
||||
}),
|
||||
},
|
||||
want: []*tailcfg.Node{
|
||||
want: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeBeforeEpoch.Add(-1*time.Hour)), // should have expired, but ControlTime is before epoch
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
name: "tagged_node",
|
||||
controlTime: &now,
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", time.Time{}), // tagged node; zero expiry
|
||||
},
|
||||
}),
|
||||
},
|
||||
want: []*tailcfg.Node{
|
||||
want: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", time.Time{}), // not expired
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -147,10 +147,10 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "no_expiry",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", noExpiry),
|
||||
n(2, "bar", noExpiry),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(3, "self", noExpiry),
|
||||
},
|
||||
want: noExpiry,
|
||||
@@ -158,10 +158,10 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "future_expiry_from_peer",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", noExpiry),
|
||||
n(2, "bar", timeInFuture),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(3, "self", noExpiry),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -169,10 +169,10 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "future_expiry_from_self",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", noExpiry),
|
||||
n(2, "bar", noExpiry),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(3, "self", timeInFuture),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -180,10 +180,10 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "future_expiry_from_multiple_peers",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
n(2, "bar", timeInMoreFuture),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(3, "self", noExpiry),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -191,9 +191,9 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "future_expiry_from_peer_and_self",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInMoreFuture),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(2, "self", timeInFuture),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -201,7 +201,7 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "only_self",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{},
|
||||
Peers: nodeViews([]*tailcfg.Node{}),
|
||||
SelfNode: n(1, "self", timeInFuture),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -209,9 +209,9 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "peer_already_expired",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInPast),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(2, "self", timeInFuture),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -219,9 +219,9 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "self_already_expired",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInFuture),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(2, "self", timeInPast),
|
||||
},
|
||||
want: timeInFuture,
|
||||
@@ -229,9 +229,9 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
{
|
||||
name: "all_nodes_already_expired",
|
||||
netmap: &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInPast),
|
||||
},
|
||||
}),
|
||||
SelfNode: n(2, "self", timeInPast),
|
||||
},
|
||||
want: noExpiry,
|
||||
@@ -263,9 +263,9 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
// If we don't adjust for the local time, this would return a
|
||||
// time in the past.
|
||||
nm := &netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
n(1, "foo", timeInPast),
|
||||
},
|
||||
}),
|
||||
}
|
||||
got := em.nextPeerExpiry(nm, now)
|
||||
want := now.Add(30 * time.Second)
|
||||
@@ -275,24 +275,24 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func formatNodes(nodes []*tailcfg.Node) string {
|
||||
func formatNodes(nodes []tailcfg.NodeView) string {
|
||||
var sb strings.Builder
|
||||
for i, n := range nodes {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&sb, "(%d, %q", n.ID, n.Name)
|
||||
fmt.Fprintf(&sb, "(%d, %q", n.ID(), n.Name())
|
||||
|
||||
if n.Online != nil {
|
||||
fmt.Fprintf(&sb, ", online=%v", *n.Online)
|
||||
if n.Online() != nil {
|
||||
fmt.Fprintf(&sb, ", online=%v", *n.Online())
|
||||
}
|
||||
if n.LastSeen != nil {
|
||||
fmt.Fprintf(&sb, ", lastSeen=%v", n.LastSeen.Unix())
|
||||
if n.LastSeen() != nil {
|
||||
fmt.Fprintf(&sb, ", lastSeen=%v", n.LastSeen().Unix())
|
||||
}
|
||||
if n.Key != (key.NodePublic{}) {
|
||||
fmt.Fprintf(&sb, ", key=%v", n.Key.String())
|
||||
if n.Key() != (key.NodePublic{}) {
|
||||
fmt.Fprintf(&sb, ", key=%v", n.Key().String())
|
||||
}
|
||||
if n.Expired {
|
||||
if n.Expired() {
|
||||
fmt.Fprintf(&sb, ", expired=true")
|
||||
}
|
||||
sb.WriteString(")")
|
||||
|
@@ -204,7 +204,7 @@ type LocalBackend struct {
|
||||
// netMap is not mutated in-place once set.
|
||||
netMap *netmap.NetworkMap
|
||||
nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil
|
||||
nodeByAddr map[netip.Addr]*tailcfg.Node
|
||||
nodeByAddr map[netip.Addr]tailcfg.NodeView
|
||||
activeLogin string // last logged LoginName from netMap
|
||||
engineStatus ipn.EngineStatus
|
||||
endpoints []tailcfg.Endpoint
|
||||
@@ -684,13 +684,13 @@ func (b *LocalBackend) updateStatus(sb *ipnstate.StatusBuilder, extraLocked func
|
||||
if !prefs.ExitNodeID().IsZero() {
|
||||
if exitPeer, ok := b.netMap.PeerWithStableID(prefs.ExitNodeID()); ok {
|
||||
var online = false
|
||||
if exitPeer.Online != nil {
|
||||
online = *exitPeer.Online
|
||||
if v := exitPeer.Online(); v != nil {
|
||||
online = *v
|
||||
}
|
||||
s.ExitNodeStatus = &ipnstate.ExitNodeStatus{
|
||||
ID: prefs.ExitNodeID(),
|
||||
Online: online,
|
||||
TailscaleIPs: exitPeer.Addresses,
|
||||
TailscaleIPs: exitPeer.Addresses().AsSlice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -705,7 +705,7 @@ func (b *LocalBackend) updateStatus(sb *ipnstate.StatusBuilder, extraLocked func
|
||||
ss.DNSName = b.netMap.Name
|
||||
ss.UserID = b.netMap.User
|
||||
if sn := b.netMap.SelfNode; sn != nil {
|
||||
peerStatusFromNode(ss, sn)
|
||||
peerStatusFromNode(ss, sn.View())
|
||||
if c := sn.Capabilities; len(c) > 0 {
|
||||
ss.Capabilities = append([]string(nil), c...)
|
||||
}
|
||||
@@ -735,28 +735,30 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) {
|
||||
exitNodeID := b.pm.CurrentPrefs().ExitNodeID()
|
||||
for _, p := range b.netMap.Peers {
|
||||
var lastSeen time.Time
|
||||
if p.LastSeen != nil {
|
||||
lastSeen = *p.LastSeen
|
||||
if p.LastSeen() != nil {
|
||||
lastSeen = *p.LastSeen()
|
||||
}
|
||||
var tailscaleIPs = make([]netip.Addr, 0, len(p.Addresses))
|
||||
for _, addr := range p.Addresses {
|
||||
var tailscaleIPs = make([]netip.Addr, 0, p.Addresses().Len())
|
||||
for i := range p.Addresses().LenIter() {
|
||||
addr := p.Addresses().At(i)
|
||||
if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) {
|
||||
tailscaleIPs = append(tailscaleIPs, addr.Addr())
|
||||
}
|
||||
}
|
||||
online := p.Online()
|
||||
ps := &ipnstate.PeerStatus{
|
||||
InNetworkMap: true,
|
||||
UserID: p.User,
|
||||
UserID: p.User(),
|
||||
TailscaleIPs: tailscaleIPs,
|
||||
HostName: p.Hostinfo.Hostname(),
|
||||
DNSName: p.Name,
|
||||
OS: p.Hostinfo.OS(),
|
||||
HostName: p.Hostinfo().Hostname(),
|
||||
DNSName: p.Name(),
|
||||
OS: p.Hostinfo().OS(),
|
||||
LastSeen: lastSeen,
|
||||
Online: p.Online != nil && *p.Online,
|
||||
ShareeNode: p.Hostinfo.ShareeNode(),
|
||||
ExitNode: p.StableID != "" && p.StableID == exitNodeID,
|
||||
SSH_HostKeys: p.Hostinfo.SSH_HostKeys().AsSlice(),
|
||||
Location: p.Hostinfo.Location(),
|
||||
Online: online != nil && *online,
|
||||
ShareeNode: p.Hostinfo().ShareeNode(),
|
||||
ExitNode: p.StableID() != "" && p.StableID() == exitNodeID,
|
||||
SSH_HostKeys: p.Hostinfo().SSH_HostKeys().AsSlice(),
|
||||
Location: p.Hostinfo().Location(),
|
||||
}
|
||||
peerStatusFromNode(ps, p)
|
||||
|
||||
@@ -767,29 +769,29 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) {
|
||||
if u := peerAPIURL(nodeIP(p, netip.Addr.Is6), p6); u != "" {
|
||||
ps.PeerAPIURL = append(ps.PeerAPIURL, u)
|
||||
}
|
||||
sb.AddPeer(p.Key, ps)
|
||||
sb.AddPeer(p.Key(), ps)
|
||||
}
|
||||
}
|
||||
|
||||
// peerStatusFromNode copies fields that exist in the Node struct for
|
||||
// current node and peers into the provided PeerStatus.
|
||||
func peerStatusFromNode(ps *ipnstate.PeerStatus, n *tailcfg.Node) {
|
||||
ps.ID = n.StableID
|
||||
ps.Created = n.Created
|
||||
ps.ExitNodeOption = tsaddr.ContainsExitRoutes(views.SliceOf(n.AllowedIPs))
|
||||
if n.Tags != nil {
|
||||
v := views.SliceOf(n.Tags)
|
||||
func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) {
|
||||
ps.ID = n.StableID()
|
||||
ps.Created = n.Created()
|
||||
ps.ExitNodeOption = tsaddr.ContainsExitRoutes(n.AllowedIPs())
|
||||
if n.Tags().Len() != 0 {
|
||||
v := n.Tags()
|
||||
ps.Tags = &v
|
||||
}
|
||||
if n.PrimaryRoutes != nil {
|
||||
v := views.SliceOf(n.PrimaryRoutes)
|
||||
if n.PrimaryRoutes().Len() != 0 {
|
||||
v := n.PrimaryRoutes()
|
||||
ps.PrimaryRoutes = &v
|
||||
}
|
||||
|
||||
if n.Expired {
|
||||
if n.Expired() {
|
||||
ps.Expired = true
|
||||
}
|
||||
if t := n.KeyExpiry; !t.IsZero() {
|
||||
if t := n.KeyExpiry(); !t.IsZero() {
|
||||
t = t.Round(time.Second)
|
||||
ps.KeyExpiry = &t
|
||||
}
|
||||
@@ -798,7 +800,8 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n *tailcfg.Node) {
|
||||
// WhoIs reports the node and user who owns the node with the given IP:port.
|
||||
// If the IP address is a Tailscale IP, the provided port may be 0.
|
||||
// If ok == true, n and u are valid.
|
||||
func (b *LocalBackend) WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool) {
|
||||
func (b *LocalBackend) WhoIs(ipp netip.AddrPort) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) {
|
||||
var zero tailcfg.NodeView
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
n, ok = b.nodeByAddr[ipp.Addr()]
|
||||
@@ -808,16 +811,16 @@ func (b *LocalBackend) WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.Use
|
||||
ip, ok = b.e.WhoIsIPPort(ipp)
|
||||
}
|
||||
if !ok {
|
||||
return nil, u, false
|
||||
return zero, u, false
|
||||
}
|
||||
n, ok = b.nodeByAddr[ip]
|
||||
if !ok {
|
||||
return nil, u, false
|
||||
return zero, u, false
|
||||
}
|
||||
}
|
||||
u, ok = b.netMap.UserProfiles[n.User]
|
||||
u, ok = b.netMap.UserProfiles[n.User()]
|
||||
if !ok {
|
||||
return nil, u, false
|
||||
return zero, u, false
|
||||
}
|
||||
return n, u, true
|
||||
}
|
||||
@@ -1114,13 +1117,14 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool)
|
||||
}
|
||||
|
||||
for _, peer := range nm.Peers {
|
||||
for _, addr := range peer.Addresses {
|
||||
for i := range peer.Addresses().LenIter() {
|
||||
addr := peer.Addresses().At(i)
|
||||
if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP {
|
||||
continue
|
||||
}
|
||||
// Found the node being referenced, upgrade prefs to
|
||||
// reference it directly for next time.
|
||||
prefs.ExitNodeID = peer.StableID
|
||||
prefs.ExitNodeID = peer.StableID()
|
||||
prefs.ExitNodeIP = netip.Addr{}
|
||||
return true
|
||||
}
|
||||
@@ -1597,16 +1601,16 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P
|
||||
//
|
||||
// If this reports true, the packet filter is invalid (the server is either broken
|
||||
// or malicious) and should be ignored for safety.
|
||||
func packetFilterPermitsUnlockedNodes(peers []*tailcfg.Node, packetFilter []filter.Match) bool {
|
||||
func packetFilterPermitsUnlockedNodes(peers []tailcfg.NodeView, packetFilter []filter.Match) bool {
|
||||
var b netipx.IPSetBuilder
|
||||
var numUnlocked int
|
||||
for _, p := range peers {
|
||||
if !p.UnsignedPeerAPIOnly {
|
||||
if !p.UnsignedPeerAPIOnly() {
|
||||
continue
|
||||
}
|
||||
numUnlocked++
|
||||
for _, a := range p.AllowedIPs { // not only addresses!
|
||||
b.AddPrefix(a)
|
||||
for i := range p.AllowedIPs().LenIter() { // not only addresses!
|
||||
b.AddPrefix(p.AllowedIPs().At(i))
|
||||
}
|
||||
}
|
||||
if numUnlocked == 0 {
|
||||
@@ -1764,11 +1768,11 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet,
|
||||
|
||||
// dnsCIDRsEqual determines whether two CIDR lists are equal
|
||||
// for DNS map construction purposes (that is, only the first entry counts).
|
||||
func dnsCIDRsEqual(newAddr, oldAddr []netip.Prefix) bool {
|
||||
if len(newAddr) != len(oldAddr) {
|
||||
func dnsCIDRsEqual(newAddr, oldAddr views.Slice[netip.Prefix]) bool {
|
||||
if newAddr.Len() != oldAddr.Len() {
|
||||
return false
|
||||
}
|
||||
if len(newAddr) == 0 || newAddr[0] == oldAddr[0] {
|
||||
if newAddr.Len() == 0 || newAddr.At(0) == oldAddr.At(0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -1792,16 +1796,16 @@ func dnsMapsEqual(new, old *netmap.NetworkMap) bool {
|
||||
if new.Name != old.Name {
|
||||
return false
|
||||
}
|
||||
if !dnsCIDRsEqual(new.Addresses, old.Addresses) {
|
||||
if !dnsCIDRsEqual(views.SliceOf(new.Addresses), views.SliceOf(old.Addresses)) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, newPeer := range new.Peers {
|
||||
oldPeer := old.Peers[i]
|
||||
if newPeer.Name != oldPeer.Name {
|
||||
if newPeer.Name() != oldPeer.Name() {
|
||||
return false
|
||||
}
|
||||
if !dnsCIDRsEqual(newPeer.Addresses, oldPeer.Addresses) {
|
||||
if !dnsCIDRsEqual(newPeer.Addresses(), oldPeer.Addresses()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -2418,8 +2422,8 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg
|
||||
if err != nil {
|
||||
pr.Err = err.Error()
|
||||
}
|
||||
if node != nil {
|
||||
pr.NodeName = node.Name
|
||||
if node.Valid() {
|
||||
pr.NodeName = node.Name()
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
@@ -2438,36 +2442,37 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer *tailcfg.Node, peerBase string, err error) {
|
||||
func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer tailcfg.NodeView, peerBase string, err error) {
|
||||
var zero tailcfg.NodeView
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
nm := b.NetMap()
|
||||
if nm == nil {
|
||||
return nil, "", errors.New("no netmap")
|
||||
return zero, "", errors.New("no netmap")
|
||||
}
|
||||
peer, ok := nm.PeerByTailscaleIP(ip)
|
||||
if !ok {
|
||||
return nil, "", fmt.Errorf("no peer found with Tailscale IP %v", ip)
|
||||
return zero, "", fmt.Errorf("no peer found with Tailscale IP %v", ip)
|
||||
}
|
||||
if peer.Expired {
|
||||
return nil, "", errors.New("peer's node key has expired")
|
||||
if peer.Expired() {
|
||||
return zero, "", errors.New("peer's node key has expired")
|
||||
}
|
||||
base := peerAPIBase(nm, peer)
|
||||
if base == "" {
|
||||
return nil, "", fmt.Errorf("no PeerAPI base found for peer %v (%v)", peer.ID, ip)
|
||||
return zero, "", fmt.Errorf("no PeerAPI base found for peer %v (%v)", peer.ID(), ip)
|
||||
}
|
||||
outReq, err := http.NewRequestWithContext(ctx, "HEAD", base, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return zero, "", err
|
||||
}
|
||||
tr := b.Dialer().PeerAPITransport()
|
||||
res, err := tr.RoundTrip(outReq)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return zero, "", err
|
||||
}
|
||||
defer res.Body.Close() // but unnecessary on HEAD responses
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", fmt.Errorf("HTTP status %v", res.Status)
|
||||
return zero, "", fmt.Errorf("HTTP status %v", res.Status)
|
||||
}
|
||||
return peer, base, nil
|
||||
}
|
||||
@@ -3098,17 +3103,24 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, prefs ipn.PrefsView, logf logger.
|
||||
// isn't configured to make MagicDNS resolution truly
|
||||
// magic. Details in
|
||||
// https://github.com/tailscale/tailscale/issues/1886.
|
||||
set := func(name string, addrs []netip.Prefix) {
|
||||
if len(addrs) == 0 || name == "" {
|
||||
set := func(name string, addrs views.Slice[netip.Prefix]) {
|
||||
if addrs.Len() == 0 || name == "" {
|
||||
return
|
||||
}
|
||||
fqdn, err := dnsname.ToFQDN(name)
|
||||
if err != nil {
|
||||
return // TODO: propagate error?
|
||||
}
|
||||
have4 := slices.ContainsFunc(addrs, tsaddr.PrefixIs4)
|
||||
var have4 bool
|
||||
for i := range addrs.LenIter() {
|
||||
if addrs.At(i).Addr().Is4() {
|
||||
have4 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
var ips []netip.Addr
|
||||
for _, addr := range addrs {
|
||||
for i := range addrs.LenIter() {
|
||||
addr := addrs.At(i)
|
||||
if selfV6Only {
|
||||
if addr.Addr().Is6() {
|
||||
ips = append(ips, addr.Addr())
|
||||
@@ -3130,9 +3142,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, prefs ipn.PrefsView, logf logger.
|
||||
}
|
||||
dcfg.Hosts[fqdn] = ips
|
||||
}
|
||||
set(nm.Name, nm.Addresses)
|
||||
set(nm.Name, views.SliceOf(nm.Addresses))
|
||||
for _, peer := range nm.Peers {
|
||||
set(peer.Name, peer.Addresses)
|
||||
set(peer.Name(), peer.Addresses())
|
||||
}
|
||||
for _, rec := range nm.DNS.ExtraRecords {
|
||||
switch rec.Type {
|
||||
@@ -3995,28 +4007,28 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
|
||||
|
||||
// Update the nodeByAddr index.
|
||||
if b.nodeByAddr == nil {
|
||||
b.nodeByAddr = map[netip.Addr]*tailcfg.Node{}
|
||||
b.nodeByAddr = map[netip.Addr]tailcfg.NodeView{}
|
||||
}
|
||||
// First pass, mark everything unwanted.
|
||||
for k := range b.nodeByAddr {
|
||||
b.nodeByAddr[k] = nil
|
||||
b.nodeByAddr[k] = tailcfg.NodeView{}
|
||||
}
|
||||
addNode := func(n *tailcfg.Node) {
|
||||
for _, ipp := range n.Addresses {
|
||||
if ipp.IsSingleIP() {
|
||||
addNode := func(n tailcfg.NodeView) {
|
||||
for i := range n.Addresses().LenIter() {
|
||||
if ipp := n.Addresses().At(i); ipp.IsSingleIP() {
|
||||
b.nodeByAddr[ipp.Addr()] = n
|
||||
}
|
||||
}
|
||||
}
|
||||
if nm.SelfNode != nil {
|
||||
addNode(nm.SelfNode)
|
||||
addNode(nm.SelfNode.View())
|
||||
}
|
||||
for _, p := range nm.Peers {
|
||||
addNode(p)
|
||||
}
|
||||
// Third pass, actually delete the unwanted items.
|
||||
for k, v := range b.nodeByAddr {
|
||||
if v == nil {
|
||||
if !v.Valid() {
|
||||
delete(b.nodeByAddr, k)
|
||||
}
|
||||
}
|
||||
@@ -4293,7 +4305,7 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, &apitype.FileTarget{
|
||||
Node: p,
|
||||
Node: p.AsStruct(),
|
||||
PeerAPIURL: peerAPI,
|
||||
})
|
||||
}
|
||||
@@ -4306,15 +4318,15 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) {
|
||||
// the netmap.
|
||||
//
|
||||
// b.mu must be locked.
|
||||
func (b *LocalBackend) peerIsTaildropTargetLocked(p *tailcfg.Node) bool {
|
||||
if b.netMap == nil || p == nil {
|
||||
func (b *LocalBackend) peerIsTaildropTargetLocked(p tailcfg.NodeView) bool {
|
||||
if b.netMap == nil || !p.Valid() {
|
||||
return false
|
||||
}
|
||||
if b.netMap.User == p.User {
|
||||
if b.netMap.User == p.User() {
|
||||
return true
|
||||
}
|
||||
if len(p.Addresses) > 0 &&
|
||||
b.peerHasCapLocked(p.Addresses[0].Addr(), tailcfg.PeerCapabilityFileSharingTarget) {
|
||||
if p.Addresses().Len() > 0 &&
|
||||
b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) {
|
||||
// Explicitly noted in the netmap ACL caps as a target.
|
||||
return true
|
||||
}
|
||||
@@ -4374,9 +4386,9 @@ func (b *LocalBackend) registerIncomingFile(inf *incomingFile, active bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func peerAPIPorts(peer *tailcfg.Node) (p4, p6 uint16) {
|
||||
svcs := peer.Hostinfo.Services()
|
||||
for i, n := 0, svcs.Len(); i < n; i++ {
|
||||
func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) {
|
||||
svcs := peer.Hostinfo().Services()
|
||||
for i := range svcs.LenIter() {
|
||||
s := svcs.At(i)
|
||||
switch s.Proto {
|
||||
case tailcfg.PeerAPI4:
|
||||
@@ -4402,8 +4414,8 @@ func peerAPIURL(ip netip.Addr, port uint16) string {
|
||||
// peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI.
|
||||
// It returns the empty string if the peer doesn't support the peerapi
|
||||
// or there's no matching address family based on the netmap's own addresses.
|
||||
func peerAPIBase(nm *netmap.NetworkMap, peer *tailcfg.Node) string {
|
||||
if nm == nil || peer == nil || !peer.Hostinfo.Valid() {
|
||||
func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string {
|
||||
if nm == nil || !peer.Valid() || !peer.Hostinfo().Valid() {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -4429,8 +4441,9 @@ func peerAPIBase(nm *netmap.NetworkMap, peer *tailcfg.Node) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func nodeIP(n *tailcfg.Node, pred func(netip.Addr) bool) netip.Addr {
|
||||
for _, a := range n.Addresses {
|
||||
func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr {
|
||||
for i := range n.Addresses().LenIter() {
|
||||
a := n.Addresses().At(i)
|
||||
if a.IsSingleIP() && pred(a.Addr()) {
|
||||
return a.Addr()
|
||||
}
|
||||
@@ -4540,15 +4553,15 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, exitNodeID tailcfg.StableNodeID)
|
||||
return "", false
|
||||
}
|
||||
for _, p := range nm.Peers {
|
||||
if p.StableID == exitNodeID && peerCanProxyDNS(p) {
|
||||
if p.StableID() == exitNodeID && peerCanProxyDNS(p) {
|
||||
return peerAPIBase(nm, p) + "/dns-query", true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func peerCanProxyDNS(p *tailcfg.Node) bool {
|
||||
if p.Cap >= 26 {
|
||||
func peerCanProxyDNS(p tailcfg.NodeView) bool {
|
||||
if p.Cap() >= 26 {
|
||||
// Actually added at 25
|
||||
// (https://github.com/tailscale/tailscale/blob/3ae6f898cfdb58fd0e30937147dd6ce28c6808dd/tailcfg/tailcfg.go#L51)
|
||||
// so anything >= 26 can do it.
|
||||
@@ -4556,10 +4569,9 @@ func peerCanProxyDNS(p *tailcfg.Node) bool {
|
||||
}
|
||||
// If p.Cap is not populated (e.g. older control server), then do the old
|
||||
// thing of searching through services.
|
||||
services := p.Hostinfo.Services()
|
||||
for i, n := 0, services.Len(); i < n; i++ {
|
||||
s := services.At(i)
|
||||
if s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 {
|
||||
services := p.Hostinfo().Services()
|
||||
for i := range services.LenIter() {
|
||||
if s := services.At(i); s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@@ -87,46 +87,46 @@ func TestNetworkMapCompare(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Peers identical",
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{})},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Peer list length",
|
||||
// length of Peers list differs
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{}}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{}})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{})},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Node names identical",
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{Name: "A"}}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{Name: "A"}}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{Name: "A"}})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{Name: "A"}})},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Node names differ",
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{Name: "A"}}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{Name: "B"}}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{Name: "A"}})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{Name: "B"}})},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Node lists identical",
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{node1, node1}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{node1, node1}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{node1, node1})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{node1, node1})},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Node lists differ",
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{node1, node1}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{node1, node2}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{node1, node1})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{node1, node2})},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Node Users differ",
|
||||
// User field is not checked.
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{User: 0}}},
|
||||
&netmap.NetworkMap{Peers: []*tailcfg.Node{{User: 1}}},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{User: 0}})},
|
||||
&netmap.NetworkMap{Peers: nodeViews([]*tailcfg.Node{{User: 1}})},
|
||||
true,
|
||||
},
|
||||
}
|
||||
@@ -483,7 +483,7 @@ func TestPeerAPIBase(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := peerAPIBase(tt.nm, tt.peer)
|
||||
got := peerAPIBase(tt.nm, tt.peer.View())
|
||||
if got != tt.want {
|
||||
t.Errorf("got %q; want %q", got, tt.want)
|
||||
}
|
||||
@@ -758,7 +758,7 @@ func TestPacketFilterPermitsUnlockedNodes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := packetFilterPermitsUnlockedNodes(tt.peers, tt.filter); got != tt.want {
|
||||
if got := packetFilterPermitsUnlockedNodes(nodeViews(tt.peers), tt.filter); got != tt.want {
|
||||
t.Errorf("got %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
@@ -69,15 +69,16 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
|
||||
var toDelete map[int]bool // peer index => true
|
||||
for i, p := range nm.Peers {
|
||||
if p.UnsignedPeerAPIOnly {
|
||||
if p.UnsignedPeerAPIOnly() {
|
||||
// Not subject to tailnet lock.
|
||||
continue
|
||||
}
|
||||
if len(p.KeySignature) == 0 {
|
||||
keySig := tkatype.MarshaledSignature(p.KeySignature().StringCopy()) // TODO(bradfitz,maisem): this is unfortunate. Change tkatype.MarshaledSignature to a string for viewer?
|
||||
if len(keySig) == 0 {
|
||||
b.logf("Network lock is dropping peer %v(%v) due to missing signature", p.ID, p.StableID)
|
||||
mak.Set(&toDelete, i, true)
|
||||
} else {
|
||||
if err := b.tka.authority.NodeKeyAuthorized(p.Key, p.KeySignature); err != nil {
|
||||
if err := b.tka.authority.NodeKeyAuthorized(p.Key(), keySig); err != nil {
|
||||
b.logf("Network lock is dropping peer %v(%v) due to failed signature check: %v", p.ID, p.StableID, err)
|
||||
mak.Set(&toDelete, i, true)
|
||||
}
|
||||
@@ -86,7 +87,7 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
|
||||
// nm.Peers is ordered, so deletion must be order-preserving.
|
||||
if len(toDelete) > 0 {
|
||||
peers := make([]*tailcfg.Node, 0, len(nm.Peers))
|
||||
peers := make([]tailcfg.NodeView, 0, len(nm.Peers))
|
||||
filtered := make([]ipnstate.TKAFilteredPeer, 0, len(toDelete))
|
||||
for i, p := range nm.Peers {
|
||||
if !toDelete[i] {
|
||||
@@ -94,13 +95,14 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||
} else {
|
||||
// Record information about the node we filtered out.
|
||||
fp := ipnstate.TKAFilteredPeer{
|
||||
Name: p.Name,
|
||||
ID: p.ID,
|
||||
StableID: p.StableID,
|
||||
TailscaleIPs: make([]netip.Addr, len(p.Addresses)),
|
||||
NodeKey: p.Key,
|
||||
Name: p.Name(),
|
||||
ID: p.ID(),
|
||||
StableID: p.StableID(),
|
||||
TailscaleIPs: make([]netip.Addr, p.Addresses().Len()),
|
||||
NodeKey: p.Key(),
|
||||
}
|
||||
for i, addr := range p.Addresses {
|
||||
for i := range p.Addresses().LenIter() {
|
||||
addr := p.Addresses().At(i)
|
||||
if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) {
|
||||
fp.TailscaleIPs[i] = addr.Addr()
|
||||
}
|
||||
|
@@ -558,26 +558,26 @@ func TestTKAFilterNetmap(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nm := netmap.NetworkMap{
|
||||
Peers: []*tailcfg.Node{
|
||||
nm := &netmap.NetworkMap{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
{ID: 2, Key: n2.Public(), KeySignature: nil}, // missing sig
|
||||
{ID: 3, Key: n3.Public(), KeySignature: n1GoodSig.Serialize()}, // someone elses sig
|
||||
{ID: 4, Key: n4.Public(), KeySignature: n4Sig.Serialize()}, // messed-up signature
|
||||
{ID: 5, Key: n5.Public(), KeySignature: n5GoodSig.Serialize()},
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
b := &LocalBackend{
|
||||
logf: t.Logf,
|
||||
tka: &tkaState{authority: authority},
|
||||
}
|
||||
b.tkaFilterNetmapLocked(&nm)
|
||||
b.tkaFilterNetmapLocked(nm)
|
||||
|
||||
want := []*tailcfg.Node{
|
||||
want := nodeViews([]*tailcfg.Node{
|
||||
{ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()},
|
||||
{ID: 5, Key: n5.Public(), KeySignature: n5GoodSig.Serialize()},
|
||||
}
|
||||
})
|
||||
nodePubComparer := cmp.Comparer(func(x, y key.NodePublic) bool {
|
||||
return x.Raw32() == y.Raw32()
|
||||
})
|
||||
|
@@ -576,7 +576,7 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) {
|
||||
}
|
||||
h := &peerAPIHandler{
|
||||
ps: pln.ps,
|
||||
isSelf: nm.SelfNode.User == peerNode.User,
|
||||
isSelf: nm.SelfNode.User == peerNode.User(),
|
||||
remoteAddr: src,
|
||||
selfNode: nm.SelfNode,
|
||||
peerNode: peerNode,
|
||||
@@ -597,7 +597,7 @@ type peerAPIHandler struct {
|
||||
remoteAddr netip.AddrPort
|
||||
isSelf bool // whether peerNode is owned by same user as this node
|
||||
selfNode *tailcfg.Node // this node; always non-nil
|
||||
peerNode *tailcfg.Node // peerNode is who's making the request
|
||||
peerNode tailcfg.NodeView // peerNode is who's making the request
|
||||
peerUser tailcfg.UserProfile // profile of peerNode
|
||||
}
|
||||
|
||||
@@ -608,8 +608,8 @@ func (h *peerAPIHandler) logf(format string, a ...any) {
|
||||
// isAddressValid reports whether addr is a valid destination address for this
|
||||
// node originating from the peer.
|
||||
func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool {
|
||||
if h.peerNode.SelfNodeV4MasqAddrForThisPeer != nil {
|
||||
return *h.peerNode.SelfNodeV4MasqAddrForThisPeer == addr
|
||||
if v := h.peerNode.SelfNodeV4MasqAddrForThisPeer(); v != nil {
|
||||
return *v == addr
|
||||
}
|
||||
pfx := netip.PrefixFrom(addr, addr.BitLen())
|
||||
return slices.Contains(h.selfNode.Addresses, pfx)
|
||||
@@ -733,7 +733,7 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
<body>
|
||||
<h1>Hello, %s (%v)</h1>
|
||||
This is my Tailscale device. Your device is %v.
|
||||
`, html.EscapeString(who), h.remoteAddr.Addr(), html.EscapeString(h.peerNode.ComputedName))
|
||||
`, html.EscapeString(who), h.remoteAddr.Addr(), html.EscapeString(h.peerNode.ComputedName()))
|
||||
|
||||
if h.isSelf {
|
||||
fmt.Fprintf(w, "<p>You are the owner of this node.\n")
|
||||
@@ -1024,7 +1024,7 @@ func (f *incomingFile) PartialFile() ipn.PartialFile {
|
||||
|
||||
// canPutFile reports whether h can put a file ("Taildrop") to this node.
|
||||
func (h *peerAPIHandler) canPutFile() bool {
|
||||
if h.peerNode.UnsignedPeerAPIOnly {
|
||||
if h.peerNode.UnsignedPeerAPIOnly() {
|
||||
// Unsigned peers can't send files.
|
||||
return false
|
||||
}
|
||||
@@ -1038,7 +1038,7 @@ func (h *peerAPIHandler) canDebug() bool {
|
||||
// This node does not expose debug info.
|
||||
return false
|
||||
}
|
||||
if h.peerNode.UnsignedPeerAPIOnly {
|
||||
if h.peerNode.UnsignedPeerAPIOnly() {
|
||||
// Unsigned peers can't debug.
|
||||
return false
|
||||
}
|
||||
@@ -1047,7 +1047,7 @@ func (h *peerAPIHandler) canDebug() bool {
|
||||
|
||||
// canWakeOnLAN reports whether h can send a Wake-on-LAN packet from this node.
|
||||
func (h *peerAPIHandler) canWakeOnLAN() bool {
|
||||
if h.peerNode.UnsignedPeerAPIOnly {
|
||||
if h.peerNode.UnsignedPeerAPIOnly() {
|
||||
return false
|
||||
}
|
||||
return h.isSelf || h.peerHasCap(tailcfg.PeerCapabilityWakeOnLAN)
|
||||
|
@@ -462,9 +462,9 @@ func TestHandlePeerAPI(t *testing.T) {
|
||||
e.ph = &peerAPIHandler{
|
||||
isSelf: tt.isSelf,
|
||||
selfNode: selfNode,
|
||||
peerNode: &tailcfg.Node{
|
||||
peerNode: (&tailcfg.Node{
|
||||
ComputedName: "some-peer-name",
|
||||
},
|
||||
}).View(),
|
||||
ps: &peerAPIServer{
|
||||
b: lb,
|
||||
},
|
||||
@@ -513,9 +513,9 @@ func TestFileDeleteRace(t *testing.T) {
|
||||
}
|
||||
ph := &peerAPIHandler{
|
||||
isSelf: true,
|
||||
peerNode: &tailcfg.Node{
|
||||
peerNode: (&tailcfg.Node{
|
||||
ComputedName: "some-peer-name",
|
||||
},
|
||||
}).View(),
|
||||
selfNode: &tailcfg.Node{
|
||||
Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.100.101/32")},
|
||||
},
|
||||
|
@@ -257,7 +257,7 @@ func (b *LocalBackend) ServeConfig() ipn.ServeConfigView {
|
||||
return b.serveConfig
|
||||
}
|
||||
|
||||
func (b *LocalBackend) HandleIngressTCPConn(ingressPeer *tailcfg.Node, target ipn.HostPort, srcAddr netip.AddrPort, getConnOrReset func() (net.Conn, bool), sendRST func()) {
|
||||
func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target ipn.HostPort, srcAddr netip.AddrPort, getConnOrReset func() (net.Conn, bool), sendRST func()) {
|
||||
b.mu.Lock()
|
||||
sc := b.serveConfig
|
||||
b.mu.Unlock()
|
||||
|
@@ -201,16 +201,16 @@ func TestServeHTTPProxy(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
b.nodeByAddr = map[netip.Addr]*tailcfg.Node{
|
||||
netip.MustParseAddr("100.150.151.152"): {
|
||||
b.nodeByAddr = map[netip.Addr]tailcfg.NodeView{
|
||||
netip.MustParseAddr("100.150.151.152"): (&tailcfg.Node{
|
||||
ComputedName: "some-peer",
|
||||
User: tailcfg.UserID(1),
|
||||
},
|
||||
netip.MustParseAddr("100.150.151.153"): {
|
||||
}).View(),
|
||||
netip.MustParseAddr("100.150.151.153"): (&tailcfg.Node{
|
||||
ComputedName: "some-tagged-peer",
|
||||
Tags: []string{"tag:server", "tag:test"},
|
||||
User: tailcfg.UserID(1),
|
||||
},
|
||||
}).View(),
|
||||
}
|
||||
|
||||
// Start test serve endpoint.
|
||||
|
Reference in New Issue
Block a user