ipn/ipnlocal: use "nb" consistently as receiver for nodeBackend

Cleanup after #15866. It was using a mix of "b" and "c" before. But "b"
is ambiguous with LocalBackend's usual "b".

Updates #12614

Change-Id: I8c2e84597555ec3db0d783a00ac1c12549ce6706
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick
2025-05-02 17:28:41 -07:00
committed by Brad Fitzpatrick
parent 653c45585e
commit 32ce1bdb48
2 changed files with 136 additions and 136 deletions

View File

@@ -1466,16 +1466,16 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
// AppendMatchingPeers returns base with all peers that match pred appended.
//
// It acquires b.mu to read the netmap but releases it before calling pred.
func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
var peers []tailcfg.NodeView
b.mu.Lock()
if b.netMap != nil {
nb.mu.Lock()
if nb.netMap != nil {
// All fields on b.netMap are immutable, so this is
// safe to copy and use outside the lock.
peers = b.netMap.Peers
peers = nb.netMap.Peers
}
b.mu.Unlock()
nb.mu.Unlock()
ret := base
for _, peer := range peers {
@@ -1483,9 +1483,9 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai
// UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID,
// and then look up the latest copy in b.peers which is updated in
// response to UpdateNetmapDelta edits.
b.mu.Lock()
peer, ok := b.peers[peer.ID()]
b.mu.Unlock()
nb.mu.Lock()
peer, ok := nb.peers[peer.ID()]
nb.mu.Unlock()
if ok && pred(peer) {
ret = append(ret, peer)
}
@@ -1495,21 +1495,21 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai
// PeerCaps returns the capabilities that remote src IP has to
// ths current node.
func (b *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
b.mu.Lock()
defer b.mu.Unlock()
return b.peerCapsLocked(src)
func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
nb.mu.Lock()
defer nb.mu.Unlock()
return nb.peerCapsLocked(src)
}
func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
if b.netMap == nil {
func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
if nb.netMap == nil {
return nil
}
filt := b.filterAtomic.Load()
filt := nb.filterAtomic.Load()
if filt == nil {
return nil
}
addrs := b.netMap.GetAddresses()
addrs := nb.netMap.GetAddresses()
for i := range addrs.Len() {
a := addrs.At(i)
if !a.IsSingleIP() {
@@ -1523,8 +1523,8 @@ func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
return nil
}
func (b *nodeBackend) GetFilterForTest() *filter.Filter {
return b.filterAtomic.Load()
func (nb *nodeBackend) GetFilterForTest() *filter.Filter {
return nb.filterAtomic.Load()
}
// SetControlClientStatus is the callback invoked by the control client whenever it posts a new status.
@@ -2034,14 +2034,14 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo
return true
}
func (c *nodeBackend) netMapWithPeers() *netmap.NetworkMap {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil {
func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil {
return nil
}
nm := ptr.To(*c.netMap) // shallow clone
nm.Peers = slicesx.MapValues(c.peers)
nm := ptr.To(*nb.netMap) // shallow clone
nm.Peers = slicesx.MapValues(nb.peers)
slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
@@ -2078,10 +2078,10 @@ func (b *LocalBackend) pickNewAutoExitNode() {
b.send(ipn.Notify{Prefs: &newPrefs})
}
func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil || len(c.peers) == 0 {
func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil || len(nb.peers) == 0 {
return false
}
@@ -2093,7 +2093,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo
for _, m := range muts {
n, ok := mutableNodes[m.NodeIDBeingMutated()]
if !ok {
nv, ok := c.peers[m.NodeIDBeingMutated()]
nv, ok := nb.peers[m.NodeIDBeingMutated()]
if !ok {
// TODO(bradfitz): unexpected metric?
return false
@@ -2104,7 +2104,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo
m.Apply(n)
}
for nid, n := range mutableNodes {
c.peers[nid] = n.View()
nb.peers[nid] = n.View()
}
return true
}
@@ -2265,10 +2265,10 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView {
return b.currentNode().PeersForTest()
}
func (b *nodeBackend) PeersForTest() []tailcfg.NodeView {
b.mu.Lock()
defer b.mu.Unlock()
ret := slicesx.MapValues(b.peers)
func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView {
nb.mu.Lock()
defer nb.mu.Unlock()
ret := slicesx.MapValues(nb.peers)
slices.SortFunc(ret, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
@@ -2838,8 +2838,8 @@ func (b *LocalBackend) setFilter(f *filter.Filter) {
b.e.SetFilter(f)
}
func (c *nodeBackend) setFilter(f *filter.Filter) {
c.filterAtomic.Store(f)
func (nb *nodeBackend) setFilter(f *filter.Filter) {
nb.filterAtomic.Store(f)
}
var removeFromDefaultRoute = []netip.Prefix{
@@ -4773,10 +4773,10 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap {
return b.currentNode().NetMap()
}
func (c *nodeBackend) NetMap() *netmap.NetworkMap {
c.mu.Lock()
defer c.mu.Unlock()
return c.netMap
func (nb *nodeBackend) NetMap() *netmap.NetworkMap {
nb.mu.Lock()
defer nb.mu.Unlock()
return nb.netMap
}
func (b *LocalBackend) isEngineBlocked() bool {
@@ -5018,10 +5018,10 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs
return false
}
func (c *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
c.mu.Lock()
defer c.mu.Unlock()
return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS)
func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
nb.mu.Lock()
defer nb.mu.Unlock()
return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS)
}
// dnsConfigForNetmap returns a *dns.Config for the given netmap,
@@ -6144,12 +6144,12 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre
return newPrefs
}
func (c *nodeBackend) SetNetMap(nm *netmap.NetworkMap) {
c.mu.Lock()
defer c.mu.Unlock()
c.netMap = nm
c.updateNodeByAddrLocked()
c.updatePeersLocked()
func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) {
nb.mu.Lock()
defer nb.mu.Unlock()
nb.netMap = nm
nb.updateNodeByAddrLocked()
nb.updatePeersLocked()
}
// setNetMapLocked updates the LocalBackend state to reflect the newly
@@ -6224,25 +6224,25 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
b.driveNotifyCurrentSharesLocked()
}
func (b *nodeBackend) updateNodeByAddrLocked() {
nm := b.netMap
func (nb *nodeBackend) updateNodeByAddrLocked() {
nm := nb.netMap
if nm == nil {
b.nodeByAddr = nil
nb.nodeByAddr = nil
return
}
// Update the nodeByAddr index.
if b.nodeByAddr == nil {
b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{}
if nb.nodeByAddr == nil {
nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{}
}
// First pass, mark everything unwanted.
for k := range b.nodeByAddr {
b.nodeByAddr[k] = 0
for k := range nb.nodeByAddr {
nb.nodeByAddr[k] = 0
}
addNode := func(n tailcfg.NodeView) {
for _, ipp := range n.Addresses().All() {
if ipp.IsSingleIP() {
b.nodeByAddr[ipp.Addr()] = n.ID()
nb.nodeByAddr[ipp.Addr()] = n.ID()
}
}
}
@@ -6253,34 +6253,34 @@ func (b *nodeBackend) updateNodeByAddrLocked() {
addNode(p)
}
// Third pass, actually delete the unwanted items.
for k, v := range b.nodeByAddr {
for k, v := range nb.nodeByAddr {
if v == 0 {
delete(b.nodeByAddr, k)
delete(nb.nodeByAddr, k)
}
}
}
func (b *nodeBackend) updatePeersLocked() {
nm := b.netMap
func (nb *nodeBackend) updatePeersLocked() {
nm := nb.netMap
if nm == nil {
b.peers = nil
nb.peers = nil
return
}
// First pass, mark everything unwanted.
for k := range b.peers {
b.peers[k] = tailcfg.NodeView{}
for k := range nb.peers {
nb.peers[k] = tailcfg.NodeView{}
}
// Second pass, add everything wanted.
for _, p := range nm.Peers {
mak.Set(&b.peers, p.ID(), p)
mak.Set(&nb.peers, p.ID(), p)
}
// Third pass, remove deleted things.
for k, v := range b.peers {
for k, v := range nb.peers {
if !v.Valid() {
delete(b.peers, k)
delete(nb.peers, k)
}
}
}
@@ -6667,14 +6667,14 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK
// PeerHasCap reports whether the peer with the given Tailscale IP addresses
// contains the given capability string, with any value(s).
func (b *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
b.mu.Lock()
defer b.mu.Unlock()
return b.peerHasCapLocked(addr, wantCap)
func (nb *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
nb.mu.Lock()
defer nb.mu.Unlock()
return nb.peerHasCapLocked(addr, wantCap)
}
func (b *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
return b.peerCapsLocked(addr).HasCapability(wantCap)
func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
return nb.peerCapsLocked(addr).HasCapability(wantCap)
}
// SetDNS adds a DNS record for the given domain name & TXT record
@@ -6737,16 +6737,16 @@ func peerAPIURL(ip netip.Addr, port uint16) string {
return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port))
}
func (c *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
return c.PeerAPIBase(p) != ""
func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
return nb.PeerAPIBase(p) != ""
}
// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI,
// or the empty string if the peer is invalid or doesn't support PeerAPI.
func (c *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
c.mu.Lock()
nm := c.netMap
c.mu.Unlock()
func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
nb.mu.Lock()
nm := nb.netMap
nb.mu.Unlock()
return peerAPIBase(nm, p)
}
@@ -6987,10 +6987,10 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg
return "", false
}
func (c *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID)
func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
nb.mu.Lock()
defer nb.mu.Unlock()
return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID)
}
// wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a

View File

@@ -87,17 +87,17 @@ func newNodeBackend() *nodeBackend {
return cn
}
func (c *nodeBackend) Self() tailcfg.NodeView {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil {
func (nb *nodeBackend) Self() tailcfg.NodeView {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil {
return tailcfg.NodeView{}
}
return c.netMap.SelfNode
return nb.netMap.SelfNode
}
func (c *nodeBackend) SelfUserID() tailcfg.UserID {
self := c.Self()
func (nb *nodeBackend) SelfUserID() tailcfg.UserID {
self := nb.Self()
if !self.Valid() {
return 0
}
@@ -105,59 +105,59 @@ func (c *nodeBackend) SelfUserID() tailcfg.UserID {
}
// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap.
func (c *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
return c.SelfHasCapOr(wantCap, false)
func (nb *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
return nb.SelfHasCapOr(wantCap, false)
}
// SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value
// if the netmap is not available yet.
func (c *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil {
func (nb *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil {
return def
}
return c.netMap.AllCaps.Contains(wantCap)
return nb.netMap.AllCaps.Contains(wantCap)
}
func (c *nodeBackend) NetworkProfile() ipn.NetworkProfile {
c.mu.Lock()
defer c.mu.Unlock()
func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile {
nb.mu.Lock()
defer nb.mu.Unlock()
return ipn.NetworkProfile{
// These are ok to call with nil netMap.
MagicDNSName: c.netMap.MagicDNSSuffix(),
DomainName: c.netMap.DomainName(),
MagicDNSName: nb.netMap.MagicDNSSuffix(),
DomainName: nb.netMap.DomainName(),
}
}
// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]?
func (c *nodeBackend) DERPMap() *tailcfg.DERPMap {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil {
func (nb *nodeBackend) DERPMap() *tailcfg.DERPMap {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil {
return nil
}
return c.netMap.DERPMap
return nb.netMap.DERPMap
}
func (c *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
nid, ok := c.nodeByAddr[ip]
func (nb *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
nb.mu.Lock()
defer nb.mu.Unlock()
nid, ok := nb.nodeByAddr[ip]
return nid, ok
}
func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
if c.netMap == nil {
func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.netMap == nil {
return 0, false
}
if self := c.netMap.SelfNode; self.Valid() && self.Key() == k {
if self := nb.netMap.SelfNode; self.Valid() && self.Key() == k {
return self.ID(), true
}
// TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers.
for _, n := range c.peers {
for _, n := range nb.peers {
if n.Key() == k {
return n.ID(), true
}
@@ -165,17 +165,17 @@ func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
return 0, false
}
func (c *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
n, ok := c.peers[id]
func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
nb.mu.Lock()
defer nb.mu.Unlock()
n, ok := nb.peers[id]
return n, ok
}
func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
c.mu.Lock()
nm := c.netMap
c.mu.Unlock()
func (nb *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
nb.mu.Lock()
nm := nb.netMap
nb.mu.Unlock()
if nm == nil {
return tailcfg.UserProfileView{}, false
}
@@ -184,10 +184,10 @@ func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok
}
// Peers returns all the current peers in an undefined order.
func (c *nodeBackend) Peers() []tailcfg.NodeView {
c.mu.Lock()
defer c.mu.Unlock()
return slicesx.MapValues(c.peers)
func (nb *nodeBackend) Peers() []tailcfg.NodeView {
nb.mu.Lock()
defer nb.mu.Unlock()
return slicesx.MapValues(nb.peers)
}
// unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs
@@ -196,12 +196,12 @@ func (c *nodeBackend) Peers() []tailcfg.NodeView {
// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here,
// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter.
// Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps?
func (c *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool {
c.mu.Lock()
defer c.mu.Unlock()
return packetFilterPermitsUnlockedNodes(c.peers, packetFilter)
func (nb *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool {
nb.mu.Lock()
defer nb.mu.Unlock()
return packetFilterPermitsUnlockedNodes(nb.peers, packetFilter)
}
func (c *nodeBackend) filter() *filter.Filter {
return c.filterAtomic.Load()
func (nb *nodeBackend) filter() *filter.Filter {
return nb.filterAtomic.Load()
}