mirror of
https://github.com/tailscale/tailscale.git
synced 2025-05-13 21:18:32 +00:00
ipn/ipnlocal: rename localNodeContext to nodeBackend
As just discussed on Slack with @nickkhyl. Updates #12614 Change-Id: I138dd7eaffb274494297567375d969b4122f3f50 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
4fa9411e3f
commit
653c45585e
@ -258,7 +258,7 @@ type LocalBackend struct {
|
||||
// We intend to relax this in the future and only require holding b.mu when replacing it,
|
||||
// but that requires a better (strictly ordered?) state machine and better management
|
||||
// of [LocalBackend]'s own state that is not tied to the node context.
|
||||
currentNodeAtomic atomic.Pointer[localNodeContext]
|
||||
currentNodeAtomic atomic.Pointer[nodeBackend]
|
||||
|
||||
conf *conffile.Config // latest parsed config, or nil if not in declarative mode
|
||||
pm *profileManager // mu guards access
|
||||
@ -519,7 +519,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running
|
||||
needsCaptiveDetection: make(chan bool),
|
||||
}
|
||||
b.currentNodeAtomic.Store(newLocalNodeContext())
|
||||
b.currentNodeAtomic.Store(newNodeBackend())
|
||||
mConn.SetNetInfoCallback(b.setNetInfo)
|
||||
|
||||
if sys.InitialConfig != nil {
|
||||
@ -594,12 +594,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
func (b *LocalBackend) Clock() tstime.Clock { return b.clock }
|
||||
func (b *LocalBackend) Sys() *tsd.System { return b.sys }
|
||||
|
||||
func (b *LocalBackend) currentNode() *localNodeContext {
|
||||
func (b *LocalBackend) currentNode() *nodeBackend {
|
||||
if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() {
|
||||
return v
|
||||
}
|
||||
// Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor...
|
||||
v := newLocalNodeContext()
|
||||
v := newNodeBackend()
|
||||
b.currentNodeAtomic.CompareAndSwap(nil, v)
|
||||
return b.currentNodeAtomic.Load()
|
||||
}
|
||||
@ -1466,7 +1466,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
|
||||
// AppendMatchingPeers returns base with all peers that match pred appended.
|
||||
//
|
||||
// It acquires b.mu to read the netmap but releases it before calling pred.
|
||||
func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
|
||||
func (b *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
|
||||
var peers []tailcfg.NodeView
|
||||
|
||||
b.mu.Lock()
|
||||
@ -1495,13 +1495,13 @@ func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred fun
|
||||
|
||||
// PeerCaps returns the capabilities that remote src IP has to
|
||||
// ths current node.
|
||||
func (b *localNodeContext) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
|
||||
func (b *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.peerCapsLocked(src)
|
||||
}
|
||||
|
||||
func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
|
||||
func (b *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
|
||||
if b.netMap == nil {
|
||||
return nil
|
||||
}
|
||||
@ -1523,7 +1523,7 @@ func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *localNodeContext) GetFilterForTest() *filter.Filter {
|
||||
func (b *nodeBackend) GetFilterForTest() *filter.Filter {
|
||||
return b.filterAtomic.Load()
|
||||
}
|
||||
|
||||
@ -2034,7 +2034,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *localNodeContext) netMapWithPeers() *netmap.NetworkMap {
|
||||
func (c *nodeBackend) netMapWithPeers() *netmap.NetworkMap {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil {
|
||||
@ -2078,7 +2078,7 @@ func (b *LocalBackend) pickNewAutoExitNode() {
|
||||
b.send(ipn.Notify{Prefs: &newPrefs})
|
||||
}
|
||||
|
||||
func (c *localNodeContext) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
|
||||
func (c *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil || len(c.peers) == 0 {
|
||||
@ -2265,7 +2265,7 @@ func (b *LocalBackend) PeersForTest() []tailcfg.NodeView {
|
||||
return b.currentNode().PeersForTest()
|
||||
}
|
||||
|
||||
func (b *localNodeContext) PeersForTest() []tailcfg.NodeView {
|
||||
func (b *nodeBackend) PeersForTest() []tailcfg.NodeView {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
ret := slicesx.MapValues(b.peers)
|
||||
@ -2547,12 +2547,12 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) {
|
||||
// TODO(nickkhyl) split this into two functions:
|
||||
// - (*localNodeContext).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool),
|
||||
// - (*nodeBackend).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool),
|
||||
// which would return packet filters for the current state and whether they changed since the last call.
|
||||
// - (*LocalBackend).updateFilters(), which would use the above to update the engine with the new filters,
|
||||
// notify b.sshServer, etc.
|
||||
//
|
||||
// For this, we would need to plumb a few more things into the [localNodeContext]. Most importantly,
|
||||
// For this, we would need to plumb a few more things into the [nodeBackend]. Most importantly,
|
||||
// the current [ipn.PrefsView]), but also maybe also a b.logf and a b.health?
|
||||
//
|
||||
// NOTE(danderson): keep change detection as the first thing in
|
||||
@ -2838,7 +2838,7 @@ func (b *LocalBackend) setFilter(f *filter.Filter) {
|
||||
b.e.SetFilter(f)
|
||||
}
|
||||
|
||||
func (c *localNodeContext) setFilter(f *filter.Filter) {
|
||||
func (c *nodeBackend) setFilter(f *filter.Filter) {
|
||||
c.filterAtomic.Store(f)
|
||||
}
|
||||
|
||||
@ -3901,7 +3901,7 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt
|
||||
// in Hostinfo. When the user preferences currently request "shields up"
|
||||
// mode, all inbound connections are refused, so services are not reported.
|
||||
// Otherwise, shouldUploadServices respects NetMap.CollectServices.
|
||||
// TODO(nickkhyl): move this into [localNodeContext]?
|
||||
// TODO(nickkhyl): move this into [nodeBackend]?
|
||||
func (b *LocalBackend) shouldUploadServices() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@ -4773,7 +4773,7 @@ func (b *LocalBackend) NetMap() *netmap.NetworkMap {
|
||||
return b.currentNode().NetMap()
|
||||
}
|
||||
|
||||
func (c *localNodeContext) NetMap() *netmap.NetworkMap {
|
||||
func (c *nodeBackend) NetMap() *netmap.NetworkMap {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.netMap
|
||||
@ -5018,7 +5018,7 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *localNodeContext) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
|
||||
func (c *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS)
|
||||
@ -6144,7 +6144,7 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre
|
||||
return newPrefs
|
||||
}
|
||||
|
||||
func (c *localNodeContext) SetNetMap(nm *netmap.NetworkMap) {
|
||||
func (c *nodeBackend) SetNetMap(nm *netmap.NetworkMap) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.netMap = nm
|
||||
@ -6224,7 +6224,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
|
||||
b.driveNotifyCurrentSharesLocked()
|
||||
}
|
||||
|
||||
func (b *localNodeContext) updateNodeByAddrLocked() {
|
||||
func (b *nodeBackend) updateNodeByAddrLocked() {
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
b.nodeByAddr = nil
|
||||
@ -6260,7 +6260,7 @@ func (b *localNodeContext) updateNodeByAddrLocked() {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *localNodeContext) updatePeersLocked() {
|
||||
func (b *nodeBackend) updatePeersLocked() {
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
b.peers = nil
|
||||
@ -6667,13 +6667,13 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK
|
||||
|
||||
// PeerHasCap reports whether the peer with the given Tailscale IP addresses
|
||||
// contains the given capability string, with any value(s).
|
||||
func (b *localNodeContext) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
|
||||
func (b *nodeBackend) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.peerHasCapLocked(addr, wantCap)
|
||||
}
|
||||
|
||||
func (b *localNodeContext) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
|
||||
func (b *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
|
||||
return b.peerCapsLocked(addr).HasCapability(wantCap)
|
||||
}
|
||||
|
||||
@ -6737,13 +6737,13 @@ func peerAPIURL(ip netip.Addr, port uint16) string {
|
||||
return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port))
|
||||
}
|
||||
|
||||
func (c *localNodeContext) PeerHasPeerAPI(p tailcfg.NodeView) bool {
|
||||
func (c *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
|
||||
return c.PeerAPIBase(p) != ""
|
||||
}
|
||||
|
||||
// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI,
|
||||
// or the empty string if the peer is invalid or doesn't support PeerAPI.
|
||||
func (c *localNodeContext) PeerAPIBase(p tailcfg.NodeView) string {
|
||||
func (c *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
|
||||
c.mu.Lock()
|
||||
nm := c.netMap
|
||||
c.mu.Unlock()
|
||||
@ -6987,7 +6987,7 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (c *localNodeContext) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
|
||||
func (c *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID)
|
||||
@ -7411,7 +7411,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err
|
||||
// down, so no need to do any work.
|
||||
return nil
|
||||
}
|
||||
b.currentNodeAtomic.Store(newLocalNodeContext())
|
||||
b.currentNodeAtomic.Store(newNodeBackend())
|
||||
b.setNetMapLocked(nil) // Reset netmap.
|
||||
b.updateFilterLocked(ipn.PrefsView{})
|
||||
// Reset the NetworkMap in the engine
|
||||
@ -8101,7 +8101,7 @@ func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) {
|
||||
// rules that require a source IP to have a certain node capability.
|
||||
//
|
||||
// TODO(bradfitz): optimize this later if/when it matters.
|
||||
// TODO(nickkhyl): move this into [localNodeContext] along with [LocalBackend.updateFilterLocked].
|
||||
// TODO(nickkhyl): move this into [nodeBackend] along with [LocalBackend.updateFilterLocked].
|
||||
func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCapability) bool {
|
||||
if cap == "" {
|
||||
// Shouldn't happen, but just in case.
|
||||
|
@ -18,29 +18,29 @@ import (
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
|
||||
// localNodeContext holds the [LocalBackend]'s context tied to a local node (usually the current one).
|
||||
// nodeBackend is node-specific [LocalBackend] state. It is usually the current node.
|
||||
//
|
||||
// Its exported methods are safe for concurrent use, but the struct is not a snapshot of state at a given moment;
|
||||
// its state can change between calls. For example, asking for the same value (e.g., netmap or prefs) twice
|
||||
// may return different results. Returned values are immutable and safe for concurrent use.
|
||||
//
|
||||
// If both the [LocalBackend]'s internal mutex and the [localNodeContext] mutex must be held at the same time,
|
||||
// If both the [LocalBackend]'s internal mutex and the [nodeBackend] mutex must be held at the same time,
|
||||
// the [LocalBackend] mutex must be acquired first. See the comment on the [LocalBackend] field for more details.
|
||||
//
|
||||
// Two pointers to different [localNodeContext] instances represent different local nodes.
|
||||
// However, there's currently a bug where a new [localNodeContext] might not be created
|
||||
// Two pointers to different [nodeBackend] instances represent different local nodes.
|
||||
// However, there's currently a bug where a new [nodeBackend] might not be created
|
||||
// during an implicit node switch (see tailscale/corp#28014).
|
||||
|
||||
// In the future, we might want to include at least the following in this struct (in addition to the current fields).
|
||||
// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions,
|
||||
// peer API handlers, etc.).
|
||||
// - [ipn.State]: when the LocalBackend switches to a different [localNodeContext], it can update the state of the old one.
|
||||
// - [ipn.State]: when the LocalBackend switches to a different [nodeBackend], it can update the state of the old one.
|
||||
// - [ipn.LoginProfileView] and [ipn.Prefs]: we should update them when the [profileManager] reports changes to them.
|
||||
// In the future, [profileManager] (and the corresponding methods of the [LocalBackend]) can be made optional,
|
||||
// and something else could be used to set them once or update them as needed.
|
||||
// - [tailcfg.HostinfoView]: it includes certain fields that are tied to the current profile/node/prefs. We should also
|
||||
// update to build it once instead of mutating it in twelvety different places.
|
||||
// - [filter.Filter] (normal and jailed, along with the filterHash): the localNodeContext could have a method to (re-)build
|
||||
// - [filter.Filter] (normal and jailed, along with the filterHash): the nodeBackend could have a method to (re-)build
|
||||
// the filter for the current netmap/prefs (see [LocalBackend.updateFilterLocked]), and it needs to track the current
|
||||
// filters and their hash.
|
||||
// - Fields related to a requested or required (re-)auth: authURL, authURLTime, authActor, keyExpired, etc.
|
||||
@ -51,7 +51,7 @@ import (
|
||||
// It should not include any fields used by specific features that don't belong in [LocalBackend].
|
||||
// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature
|
||||
// into a separate package and have it install proper hooks.
|
||||
type localNodeContext struct {
|
||||
type nodeBackend struct {
|
||||
// filterAtomic is a stateful packet filter. Immutable once created, but can be
|
||||
// replaced with a new one.
|
||||
filterAtomic atomic.Pointer[filter.Filter]
|
||||
@ -71,23 +71,23 @@ type localNodeContext struct {
|
||||
// peers is the set of current peers and their current values after applying
|
||||
// delta node mutations as they come in (with mu held). The map values can be
|
||||
// given out to callers, but the map itself can be mutated in place (with mu held)
|
||||
// and must not escape the [localNodeContext].
|
||||
// and must not escape the [nodeBackend].
|
||||
peers map[tailcfg.NodeID]tailcfg.NodeView
|
||||
|
||||
// nodeByAddr maps nodes' own addresses (excluding subnet routes) to node IDs.
|
||||
// It is mutated in place (with mu held) and must not escape the [localNodeContext].
|
||||
// It is mutated in place (with mu held) and must not escape the [nodeBackend].
|
||||
nodeByAddr map[netip.Addr]tailcfg.NodeID
|
||||
}
|
||||
|
||||
func newLocalNodeContext() *localNodeContext {
|
||||
cn := &localNodeContext{}
|
||||
func newNodeBackend() *nodeBackend {
|
||||
cn := &nodeBackend{}
|
||||
// Default filter blocks everything and logs nothing.
|
||||
noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{})
|
||||
cn.filterAtomic.Store(noneFilter)
|
||||
return cn
|
||||
}
|
||||
|
||||
func (c *localNodeContext) Self() tailcfg.NodeView {
|
||||
func (c *nodeBackend) Self() tailcfg.NodeView {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil {
|
||||
@ -96,7 +96,7 @@ func (c *localNodeContext) Self() tailcfg.NodeView {
|
||||
return c.netMap.SelfNode
|
||||
}
|
||||
|
||||
func (c *localNodeContext) SelfUserID() tailcfg.UserID {
|
||||
func (c *nodeBackend) SelfUserID() tailcfg.UserID {
|
||||
self := c.Self()
|
||||
if !self.Valid() {
|
||||
return 0
|
||||
@ -105,13 +105,13 @@ func (c *localNodeContext) SelfUserID() tailcfg.UserID {
|
||||
}
|
||||
|
||||
// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap.
|
||||
func (c *localNodeContext) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
|
||||
func (c *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
|
||||
return c.SelfHasCapOr(wantCap, false)
|
||||
}
|
||||
|
||||
// SelfHasCapOr is like [localNodeContext.SelfHasCap], but returns the specified default value
|
||||
// SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value
|
||||
// if the netmap is not available yet.
|
||||
func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
|
||||
func (c *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil {
|
||||
@ -120,7 +120,7 @@ func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool
|
||||
return c.netMap.AllCaps.Contains(wantCap)
|
||||
}
|
||||
|
||||
func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile {
|
||||
func (c *nodeBackend) NetworkProfile() ipn.NetworkProfile {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return ipn.NetworkProfile{
|
||||
@ -131,7 +131,7 @@ func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile {
|
||||
}
|
||||
|
||||
// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]?
|
||||
func (c *localNodeContext) DERPMap() *tailcfg.DERPMap {
|
||||
func (c *nodeBackend) DERPMap() *tailcfg.DERPMap {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil {
|
||||
@ -140,14 +140,14 @@ func (c *localNodeContext) DERPMap() *tailcfg.DERPMap {
|
||||
return c.netMap.DERPMap
|
||||
}
|
||||
|
||||
func (c *localNodeContext) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
|
||||
func (c *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
nid, ok := c.nodeByAddr[ip]
|
||||
return nid, ok
|
||||
}
|
||||
|
||||
func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
|
||||
func (c *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.netMap == nil {
|
||||
@ -165,14 +165,14 @@ func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok boo
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (c *localNodeContext) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
|
||||
func (c *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
n, ok := c.peers[id]
|
||||
return n, ok
|
||||
}
|
||||
|
||||
func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
|
||||
func (c *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
|
||||
c.mu.Lock()
|
||||
nm := c.netMap
|
||||
c.mu.Unlock()
|
||||
@ -184,7 +184,7 @@ func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileVie
|
||||
}
|
||||
|
||||
// Peers returns all the current peers in an undefined order.
|
||||
func (c *localNodeContext) Peers() []tailcfg.NodeView {
|
||||
func (c *nodeBackend) Peers() []tailcfg.NodeView {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return slicesx.MapValues(c.peers)
|
||||
@ -195,13 +195,13 @@ func (c *localNodeContext) Peers() []tailcfg.NodeView {
|
||||
//
|
||||
// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here,
|
||||
// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter.
|
||||
// Something like (*localNodeContext).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps?
|
||||
func (c *localNodeContext) unlockedNodesPermitted(packetFilter []filter.Match) bool {
|
||||
// Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps?
|
||||
func (c *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return packetFilterPermitsUnlockedNodes(c.peers, packetFilter)
|
||||
}
|
||||
|
||||
func (c *localNodeContext) filter() *filter.Filter {
|
||||
func (c *nodeBackend) filter() *filter.Filter {
|
||||
return c.filterAtomic.Load()
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user