ipn/ipnlocal: use "nb" consistently as receiver for nodeBackend

Cleanup after #15866. It was using a mix of "b" and "c" before. But "b"
is ambiguous with LocalBackend's usual "b".

Updates #12614

Change-Id: I8c2e84597555ec3db0d783a00ac1c12549ce6706
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick
2025-05-02 17:28:41 -07:00
committed by Brad Fitzpatrick
parent 653c45585e
commit 32ce1bdb48
2 changed files with 136 additions and 136 deletions

View File

@@ -1466,16 +1466,16 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
// AppendMatchingPeers returns base with all peers that match pred appended. // AppendMatchingPeers returns base with all peers that match pred appended.
// //
// It acquires b.mu to read the netmap but releases it before calling pred. // It acquires b.mu to read the netmap but releases it before calling pred.
func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
var peers []tailcfg.NodeView var peers []tailcfg.NodeView
b.mu.Lock() nb.mu.Lock()
if b.netMap != nil { if nb.netMap != nil {
// All fields on b.netMap are immutable, so this is // All fields on b.netMap are immutable, so this is
// safe to copy and use outside the lock. // safe to copy and use outside the lock.
peers = b.netMap.Peers peers = nb.netMap.Peers
} }
b.mu.Unlock() nb.mu.Unlock()
ret := base ret := base
for _, peer := range peers { for _, peer := range peers {
@@ -1483,9 +1483,9 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai
// UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID, // UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID,
// and then look up the latest copy in b.peers which is updated in // and then look up the latest copy in b.peers which is updated in
// response to UpdateNetmapDelta edits. // response to UpdateNetmapDelta edits.
b.mu.Lock() nb.mu.Lock()
peer, ok := b.peers[peer.ID()] peer, ok := nb.peers[peer.ID()]
b.mu.Unlock() nb.mu.Unlock()
if ok && pred(peer) { if ok && pred(peer) {
ret = append(ret, peer) ret = append(ret, peer)
} }
@@ -1495,21 +1495,21 @@ func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tai
// PeerCaps returns the capabilities that remote src IP has to // PeerCaps returns the capabilities that remote src IP has to
// ths current node. // ths current node.
func (b *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
b.mu.Lock() nb.mu.Lock()
defer b.mu.Unlock() defer nb.mu.Unlock()
return b.peerCapsLocked(src) return nb.peerCapsLocked(src)
} }
func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
if b.netMap == nil { if nb.netMap == nil {
return nil return nil
} }
filt := b.filterAtomic.Load() filt := nb.filterAtomic.Load()
if filt == nil { if filt == nil {
return nil return nil
} }
addrs := b.netMap.GetAddresses() addrs := nb.netMap.GetAddresses()
for i := range addrs.Len() { for i := range addrs.Len() {
a := addrs.At(i) a := addrs.At(i)
if !a.IsSingleIP() { if !a.IsSingleIP() {
@@ -1523,8 +1523,8 @@ func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
return nil return nil
} }
func (b *nodeBackend) GetFilterForTest() *filter.Filter { func (nb *nodeBackend) GetFilterForTest() *filter.Filter {
return b.filterAtomic.Load() return nb.filterAtomic.Load()
} }
// SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status.
@@ -2034,14 +2034,14 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo
return true return true
} }
func (c *nodeBackend) netMapWithPeers() *netmap.NetworkMap { func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil { if nb.netMap == nil {
return nil return nil
} }
nm := ptr.To(*c.netMap) // shallow clone nm := ptr.To(*nb.netMap) // shallow clone
nm.Peers = slicesx.MapValues(c.peers) nm.Peers = slicesx.MapValues(nb.peers)
slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID()) return cmp.Compare(a.ID(), b.ID())
}) })
@@ -2078,10 +2078,10 @@ func (b *LocalBackend) pickNewAutoExitNode() {
b.send(ipn.Notify{Prefs: &newPrefs}) b.send(ipn.Notify{Prefs: &newPrefs})
} }
func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil || len(c.peers) == 0 { if nb.netMap == nil || len(nb.peers) == 0 {
return false return false
} }
@@ -2093,7 +2093,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo
for _, m := range muts { for _, m := range muts {
n, ok := mutableNodes[m.NodeIDBeingMutated()] n, ok := mutableNodes[m.NodeIDBeingMutated()]
if !ok { if !ok {
nv, ok := c.peers[m.NodeIDBeingMutated()] nv, ok := nb.peers[m.NodeIDBeingMutated()]
if !ok { if !ok {
// TODO(bradfitz): unexpected metric? // TODO(bradfitz): unexpected metric?
return false return false
@@ -2104,7 +2104,7 @@ func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled boo
m.Apply(n) m.Apply(n)
} }
for nid, n := range mutableNodes { for nid, n := range mutableNodes {
c.peers[nid] = n.View() nb.peers[nid] = n.View()
} }
return true return true
} }
@@ -2265,10 +2265,10 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView {
return b.currentNode().PeersForTest() return b.currentNode().PeersForTest()
} }
func (b *nodeBackend) PeersForTest() []tailcfg.NodeView { func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView {
b.mu.Lock() nb.mu.Lock()
defer b.mu.Unlock() defer nb.mu.Unlock()
ret := slicesx.MapValues(b.peers) ret := slicesx.MapValues(nb.peers)
slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { slices.SortFunc(ret, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID()) return cmp.Compare(a.ID(), b.ID())
}) })
@@ -2838,8 +2838,8 @@ func (b *LocalBackend) setFilter(f *filter.Filter) {
b.e.SetFilter(f) b.e.SetFilter(f)
} }
func (c *nodeBackend) setFilter(f *filter.Filter) { func (nb *nodeBackend) setFilter(f *filter.Filter) {
c.filterAtomic.Store(f) nb.filterAtomic.Store(f)
} }
var removeFromDefaultRoute = []netip.Prefix{ var removeFromDefaultRoute = []netip.Prefix{
@@ -4773,10 +4773,10 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap {
return b.currentNode().NetMap() return b.currentNode().NetMap()
} }
func (c *nodeBackend) NetMap() *netmap.NetworkMap { func (nb *nodeBackend) NetMap() *netmap.NetworkMap {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return c.netMap return nb.netMap
} }
func (b *LocalBackend) isEngineBlocked() bool { func (b *LocalBackend) isEngineBlocked() bool {
@@ -5018,10 +5018,10 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs
return false return false
} }
func (c *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS) return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS)
} }
// dnsConfigForNetmap returns a *dns.Config for the given netmap, // dnsConfigForNetmap returns a *dns.Config for the given netmap,
@@ -6144,12 +6144,12 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre
return newPrefs return newPrefs
} }
func (c *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
c.netMap = nm nb.netMap = nm
c.updateNodeByAddrLocked() nb.updateNodeByAddrLocked()
c.updatePeersLocked() nb.updatePeersLocked()
} }
// setNetMapLocked updates the LocalBackend state to reflect the newly // setNetMapLocked updates the LocalBackend state to reflect the newly
@@ -6224,25 +6224,25 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
b.driveNotifyCurrentSharesLocked() b.driveNotifyCurrentSharesLocked()
} }
func (b *nodeBackend) updateNodeByAddrLocked() { func (nb *nodeBackend) updateNodeByAddrLocked() {
nm := b.netMap nm := nb.netMap
if nm == nil { if nm == nil {
b.nodeByAddr = nil nb.nodeByAddr = nil
return return
} }
// Update the nodeByAddr index. // Update the nodeByAddr index.
if b.nodeByAddr == nil { if nb.nodeByAddr == nil {
b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{}
} }
// First pass, mark everything unwanted. // First pass, mark everything unwanted.
for k := range b.nodeByAddr { for k := range nb.nodeByAddr {
b.nodeByAddr[k] = 0 nb.nodeByAddr[k] = 0
} }
addNode := func(n tailcfg.NodeView) { addNode := func(n tailcfg.NodeView) {
for _, ipp := range n.Addresses().All() { for _, ipp := range n.Addresses().All() {
if ipp.IsSingleIP() { if ipp.IsSingleIP() {
b.nodeByAddr[ipp.Addr()] = n.ID() nb.nodeByAddr[ipp.Addr()] = n.ID()
} }
} }
} }
@@ -6253,34 +6253,34 @@ func (b *nodeBackend) updateNodeByAddrLocked() {
addNode(p) addNode(p)
} }
// Third pass, actually delete the unwanted items. // Third pass, actually delete the unwanted items.
for k, v := range b.nodeByAddr { for k, v := range nb.nodeByAddr {
if v == 0 { if v == 0 {
delete(b.nodeByAddr, k) delete(nb.nodeByAddr, k)
} }
} }
} }
func (b *nodeBackend) updatePeersLocked() { func (nb *nodeBackend) updatePeersLocked() {
nm := b.netMap nm := nb.netMap
if nm == nil { if nm == nil {
b.peers = nil nb.peers = nil
return return
} }
// First pass, mark everything unwanted. // First pass, mark everything unwanted.
for k := range b.peers { for k := range nb.peers {
b.peers[k] = tailcfg.NodeView{} nb.peers[k] = tailcfg.NodeView{}
} }
// Second pass, add everything wanted. // Second pass, add everything wanted.
for _, p := range nm.Peers { for _, p := range nm.Peers {
mak.Set(&b.peers, p.ID(), p) mak.Set(&nb.peers, p.ID(), p)
} }
// Third pass, remove deleted things. // Third pass, remove deleted things.
for k, v := range b.peers { for k, v := range nb.peers {
if !v.Valid() { if !v.Valid() {
delete(b.peers, k) delete(nb.peers, k)
} }
} }
} }
@@ -6667,14 +6667,14 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK
// PeerHasCap reports whether the peer with the given Tailscale IP addresses // PeerHasCap reports whether the peer with the given Tailscale IP addresses
// contains the given capability string, with any value(s). // contains the given capability string, with any value(s).
func (b *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { func (nb *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
b.mu.Lock() nb.mu.Lock()
defer b.mu.Unlock() defer nb.mu.Unlock()
return b.peerHasCapLocked(addr, wantCap) return nb.peerHasCapLocked(addr, wantCap)
} }
func (b *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
return b.peerCapsLocked(addr).HasCapability(wantCap) return nb.peerCapsLocked(addr).HasCapability(wantCap)
} }
// SetDNS adds a DNS record for the given domain name & TXT record // SetDNS adds a DNS record for the given domain name & TXT record
@@ -6737,16 +6737,16 @@ func peerAPIURL(ip netip.Addr, port uint16) string {
return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port))
} }
func (c *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
return c.PeerAPIBase(p) != "" return nb.PeerAPIBase(p) != ""
} }
// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, // PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI,
// or the empty string if the peer is invalid or doesn't support PeerAPI. // or the empty string if the peer is invalid or doesn't support PeerAPI.
func (c *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
c.mu.Lock() nb.mu.Lock()
nm := c.netMap nm := nb.netMap
c.mu.Unlock() nb.mu.Unlock()
return peerAPIBase(nm, p) return peerAPIBase(nm, p)
} }
@@ -6987,10 +6987,10 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg
return "", false return "", false
} }
func (c *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID) return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID)
} }
// wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a // wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a

View File

@@ -87,17 +87,17 @@ func newNodeBackend() *nodeBackend {
return cn return cn
} }
func (c *nodeBackend) Self() tailcfg.NodeView { func (nb *nodeBackend) Self() tailcfg.NodeView {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil { if nb.netMap == nil {
return tailcfg.NodeView{} return tailcfg.NodeView{}
} }
return c.netMap.SelfNode return nb.netMap.SelfNode
} }
func (c *nodeBackend) SelfUserID() tailcfg.UserID { func (nb *nodeBackend) SelfUserID() tailcfg.UserID {
self := c.Self() self := nb.Self()
if !self.Valid() { if !self.Valid() {
return 0 return 0
} }
@@ -105,59 +105,59 @@ func (c *nodeBackend) SelfUserID() tailcfg.UserID {
} }
// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap. // SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap.
func (c *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool { func (nb *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
return c.SelfHasCapOr(wantCap, false) return nb.SelfHasCapOr(wantCap, false)
} }
// SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value // SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value
// if the netmap is not available yet. // if the netmap is not available yet.
func (c *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { func (nb *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil { if nb.netMap == nil {
return def return def
} }
return c.netMap.AllCaps.Contains(wantCap) return nb.netMap.AllCaps.Contains(wantCap)
} }
func (c *nodeBackend) NetworkProfile() ipn.NetworkProfile { func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return ipn.NetworkProfile{ return ipn.NetworkProfile{
// These are ok to call with nil netMap. // These are ok to call with nil netMap.
MagicDNSName: c.netMap.MagicDNSSuffix(), MagicDNSName: nb.netMap.MagicDNSSuffix(),
DomainName: c.netMap.DomainName(), DomainName: nb.netMap.DomainName(),
} }
} }
// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]? // TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]?
func (c *nodeBackend) DERPMap() *tailcfg.DERPMap { func (nb *nodeBackend) DERPMap() *tailcfg.DERPMap {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil { if nb.netMap == nil {
return nil return nil
} }
return c.netMap.DERPMap return nb.netMap.DERPMap
} }
func (c *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { func (nb *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
nid, ok := c.nodeByAddr[ip] nid, ok := nb.nodeByAddr[ip]
return nid, ok return nid, ok
} }
func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
if c.netMap == nil { if nb.netMap == nil {
return 0, false return 0, false
} }
if self := c.netMap.SelfNode; self.Valid() && self.Key() == k { if self := nb.netMap.SelfNode; self.Valid() && self.Key() == k {
return self.ID(), true return self.ID(), true
} }
// TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers. // TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers.
for _, n := range c.peers { for _, n := range nb.peers {
if n.Key() == k { if n.Key() == k {
return n.ID(), true return n.ID(), true
} }
@@ -165,17 +165,17 @@ func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
return 0, false return 0, false
} }
func (c *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
n, ok := c.peers[id] n, ok := nb.peers[id]
return n, ok return n, ok
} }
func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { func (nb *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
c.mu.Lock() nb.mu.Lock()
nm := c.netMap nm := nb.netMap
c.mu.Unlock() nb.mu.Unlock()
if nm == nil { if nm == nil {
return tailcfg.UserProfileView{}, false return tailcfg.UserProfileView{}, false
} }
@@ -184,10 +184,10 @@ func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok
} }
// Peers returns all the current peers in an undefined order. // Peers returns all the current peers in an undefined order.
func (c *nodeBackend) Peers() []tailcfg.NodeView { func (nb *nodeBackend) Peers() []tailcfg.NodeView {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return slicesx.MapValues(c.peers) return slicesx.MapValues(nb.peers)
} }
// unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs // unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs
@@ -196,12 +196,12 @@ func (c *nodeBackend) Peers() []tailcfg.NodeView {
// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here, // TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here,
// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter. // but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter.
// Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? // Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps?
func (c *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool { func (nb *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool {
c.mu.Lock() nb.mu.Lock()
defer c.mu.Unlock() defer nb.mu.Unlock()
return packetFilterPermitsUnlockedNodes(c.peers, packetFilter) return packetFilterPermitsUnlockedNodes(nb.peers, packetFilter)
} }
func (c *nodeBackend) filter() *filter.Filter { func (nb *nodeBackend) filter() *filter.Filter {
return c.filterAtomic.Load() return nb.filterAtomic.Load()
} }