diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 19d8e8b86..c0f5b25f3 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -382,14 +382,14 @@ func TestAllowExitNodeDNSProxyToServeName(t *testing.T) { t.Fatal("unexpected true on backend with nil NetMap") } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ DNS: tailcfg.DNSConfig{ ExitNodeFilteredSet: []string{ ".ts.net", "some.exact.bad", }, }, - } + }) tests := []struct { name string want bool diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 8ae813ff2..f13c9de48 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -4,7 +4,6 @@ package ipnlocal import ( - "cmp" "fmt" "os" "slices" @@ -26,26 +25,14 @@ const ( // enabled. This is currently based on checking for the drive:share node // attribute. func (b *LocalBackend) DriveSharingEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.driveSharingEnabledLocked() -} - -func (b *LocalBackend) driveSharingEnabledLocked() bool { - return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveShare) + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) } // DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes // is enabled. This is currently based on checking for the drive:access node // attribute. func (b *LocalBackend) DriveAccessEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.driveAccessEnabledLocked() -} - -func (b *LocalBackend) driveAccessEnabledLocked() bool { - return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveAccess) + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -266,7 +253,7 @@ func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, dr // shares has changed since the last notification. func (b *LocalBackend) driveNotifyCurrentSharesLocked() { var shares views.SliceView[*drive.Share, drive.ShareView] - if b.driveSharingEnabledLocked() { + if b.DriveSharingEnabled() { // Only populate shares if sharing is enabled. shares = b.pm.prefs.DriveShares() } @@ -310,12 +297,12 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) { } var driveRemotes []*drive.Remote - if b.driveAccessEnabledLocked() { + if b.DriveAccessEnabled() { // Only populate peers if access is enabled, otherwise leave blank. driveRemotes = b.driveRemotesFromPeers(nm) } - fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport()) + fs.SetRemotes(nm.Domain, driveRemotes, b.newDriveTransport()) } func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote { @@ -330,23 +317,20 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // Peers are available to Taildrive if: // - They are online // - They are allowed to share at least one folder with us - b.mu.Lock() - latestNetMap := b.netMap - b.mu.Unlock() - - idx, found := slices.BinarySearchFunc(latestNetMap.Peers, peerID, func(candidate tailcfg.NodeView, id tailcfg.NodeID) int { - return cmp.Compare(candidate.ID(), id) - }) - if !found { + cn := b.currentNode() + peer, ok := cn.PeerByID(peerID) + if !ok { return false } - peer := latestNetMap.Peers[idx] - // Exclude offline peers. // TODO(oxtoacart): for some reason, this correctly // catches when a node goes from offline to online, // but not the other way around... + // TODO(oxtoacart,nickkhyl): the reason was probably + // that we were using netmap.Peers instead of b.peers. + // The netmap.Peers slice is not updated in all cases. + // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { return false } @@ -354,8 +338,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // Check that the peer is allowed to share with us. addresses := peer.Addresses() for _, p := range addresses.All() { - capsMap := b.PeerCaps(p.Addr()) - if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) { + if cn.PeerHasCap(p.Addr(), tailcfg.PeerCapabilityTaildriveSharer) { return true } } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d60f05b11..308d03197 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -199,15 +199,15 @@ type LocalBackend struct { portpollOnce sync.Once // guards starting readPoller varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil - sshAtomicBool atomic.Bool + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeContext + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. - webClientAtomicBool atomic.Bool + webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext // exposeRemoteWebClientAtomicBool controls whether the web client is exposed over // Tailscale on port 5252. - exposeRemoteWebClientAtomicBool atomic.Bool - shutdownCalled bool // if Shutdown has been called + exposeRemoteWebClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeContext + shutdownCalled bool // if Shutdown has been called debugSink packet.CaptureSink sockstatLogger *sockstatlog.Logger @@ -227,11 +227,10 @@ type LocalBackend struct { // is never called. getTCPHandlerForFunnelFlow func(srcAddr netip.AddrPort, dstPort uint16) (handler func(net.Conn)) - filterAtomic atomic.Pointer[filter.Filter] - containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] - shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] - shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] - numClientStatusCalls atomic.Uint32 + containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool] // TODO(nickkhyl): move to nodeContext + shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool] // TODO(nickkhyl): move to nodeContext + shouldInterceptVIPServicesTCPPortAtomic syncs.AtomicValue[func(netip.AddrPort) bool] // TODO(nickkhyl): move to nodeContext + numClientStatusCalls atomic.Uint32 // TODO(nickkhyl): move to nodeContext // goTracker accounts for all goroutines started by LocalBacked, primarily // for testing and graceful shutdown purposes. @@ -245,46 +244,49 @@ type LocalBackend struct { extHost *ExtensionHost // The mutex protects the following elements. - mu sync.Mutex - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum + mu sync.Mutex + + // currentNodeAtomic is the current node context. It is always non-nil. + // It must be re-created when [LocalBackend] switches to a different profile/node + // (see tailscale/corp#28014 for a bug), but can be mutated in place (via its methods) + // while [LocalBackend] represents the same node. + // + // It is safe for reading with or without holding b.mu, but mutating it in place + // or creating a new one must be done with b.mu held. If both mutexes must be held, + // the LocalBackend's mutex must be acquired first before acquiring the nodeContext's mutex. + // + // We intend to relax this in the future and only require holding b.mu when replacing it, + // but that requires a better (strictly ordered?) state machine and better management + // of [LocalBackend]'s own state that is not tied to the node context. + currentNodeAtomic atomic.Pointer[localNodeContext] + + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + filterHash deephash.Sum // TODO(nickkhyl): move to nodeContext httpTestClient *http.Client // for controlclient. nil by default, used by tests. ccGen clientGen // function for producing controlclient; lazily populated sshServer SSHServer // or nil, initialized lazily. appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc - cc controlclient.Client - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto + cc controlclient.Client // TODO(nickkhyl): move to nodeContext + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeContext machinePrivKey key.MachinePrivate - tka *tkaState - state ipn.State - capFileSharing bool // whether netMap contains the file sharing capability - capTailnetLock bool // whether netMap contains the tailnet lock capability + tka *tkaState // TODO(nickkhyl): move to nodeContext + state ipn.State // TODO(nickkhyl): move to nodeContext + capFileSharing bool // whether netMap contains the file sharing capability + capTailnetLock bool // whether netMap contains the tailnet lock capability // hostinfo is mutated in-place while mu is held. - hostinfo *tailcfg.Hostinfo - // netMap is the most recently set full netmap from the controlclient. - // It can't be mutated in place once set. Because it can't be mutated in place, - // delta updates from the control server don't apply to it. Instead, use - // the peers map to get up-to-date information on the state of peers. - // In general, avoid using the netMap.Peers slice. We'd like it to go away - // as of 2023-09-17. - netMap *netmap.NetworkMap - // peers is the set of current peers and their current values after applying - // delta node mutations as they come in (with mu held). The map values can - // be given out to callers, but the map itself must not escape the LocalBackend. - peers map[tailcfg.NodeID]tailcfg.NodeView - nodeByAddr map[netip.Addr]tailcfg.NodeID // by Node.Addresses only (not subnet routes) - nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil - activeLogin string // last logged LoginName from netMap + hostinfo *tailcfg.Hostinfo // TODO(nickkhyl): move to nodeContext + nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil; TODO(nickkhyl): move to nodeContext + activeLogin string // last logged LoginName from netMap; TODO(nickkhyl): move to nodeContext (or remove? it's in [ipn.LoginProfile]). engineStatus ipn.EngineStatus endpoints []tailcfg.Endpoint blocked bool - keyExpired bool - authURL string // non-empty if not Running - authURLTime time.Time // when the authURL was received from the control server - authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil + keyExpired bool // TODO(nickkhyl): move to nodeContext + authURL string // non-empty if not Running; TODO(nickkhyl): move to nodeContext + authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeContext + authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeContext egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -315,7 +317,7 @@ type LocalBackend struct { lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. - capForcedNetfilter string + capForcedNetfilter string // TODO(nickkhyl): move to nodeContext // offlineAutoUpdateCancel stops offline auto-updates when called. It // should be used via stopOfflineAutoUpdate and // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are @@ -327,7 +329,7 @@ type LocalBackend struct { // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig serveConfig ipn.ServeConfigView // or !Valid if none - ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names + ipVIPServiceMap netmap.IPServiceMappings // map of VIPService IPs to their corresponding service names; TODO(nickkhyl): move to nodeContext webClient webClient webClientListeners map[netip.AddrPort]*localListener // listeners for local web client traffic @@ -342,7 +344,7 @@ type LocalBackend struct { // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. - dialPlan atomic.Pointer[tailcfg.ControlDialPlan] + dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeContext? // tkaSyncLock is used to make tkaSyncIfNeeded an exclusive // section. This is needed to stop two map-responses in quick succession @@ -517,6 +519,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } + b.currentNodeAtomic.Store(newLocalNodeContext()) mConn.SetNetInfoCallback(b.setNetInfo) if sys.InitialConfig != nil { @@ -591,6 +594,16 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } +func (b *LocalBackend) currentNode() *localNodeContext { + if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { + return v + } + // Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor... + v := newLocalNodeContext() + b.currentNodeAtomic.CompareAndSwap(nil, v) + return b.currentNodeAtomic.Load() +} + // FindExtensionByName returns an active extension with the given name, // or nil if no such extension exists. func (b *LocalBackend) FindExtensionByName(name string) any { @@ -860,7 +873,7 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { return } networkUp := b.prevIfState.AnyInterfaceUp() - b.cc.SetPaused((b.state == ipn.Stopped && b.netMap != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) + b.cc.SetPaused((b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) } // DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe @@ -918,11 +931,13 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. - b.updateFilterLocked(b.netMap, b.pm.CurrentPrefs()) + b.updateFilterLocked(b.pm.CurrentPrefs()) updateExitNodeUsageWarning(b.pm.CurrentPrefs(), delta.New, b.health) - if peerAPIListenAsync && b.netMap != nil && b.state == ipn.Running { - want := b.netMap.GetAddresses().Len() + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() have := len(b.peerAPIListeners) b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) if have < want { @@ -1163,6 +1178,8 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { b.mu.Lock() defer b.mu.Unlock() + cn := b.currentNode() + nm := cn.NetMap() sb.MutateStatus(func(s *ipnstate.Status) { s.Version = version.Long() s.TUN = !b.sys.IsNetstack() @@ -1179,21 +1196,21 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { if m := b.sshOnButUnusableHealthCheckMessageLocked(); m != "" { s.Health = append(s.Health, m) } - if b.netMap != nil { - s.CertDomains = append([]string(nil), b.netMap.DNS.CertDomains...) - s.MagicDNSSuffix = b.netMap.MagicDNSSuffix() + if nm != nil { + s.CertDomains = append([]string(nil), nm.DNS.CertDomains...) + s.MagicDNSSuffix = nm.MagicDNSSuffix() if s.CurrentTailnet == nil { s.CurrentTailnet = &ipnstate.TailnetStatus{} } - s.CurrentTailnet.MagicDNSSuffix = b.netMap.MagicDNSSuffix() - s.CurrentTailnet.MagicDNSEnabled = b.netMap.DNS.Proxied - s.CurrentTailnet.Name = b.netMap.Domain + s.CurrentTailnet.MagicDNSSuffix = nm.MagicDNSSuffix() + s.CurrentTailnet.MagicDNSEnabled = nm.DNS.Proxied + s.CurrentTailnet.Name = nm.Domain if prefs := b.pm.CurrentPrefs(); prefs.Valid() { - if !prefs.RouteAll() && b.netMap.AnyPeersAdvertiseRoutes() { + if !prefs.RouteAll() && nm.AnyPeersAdvertiseRoutes() { s.Health = append(s.Health, healthmsg.WarnAcceptRoutesOff) } if !prefs.ExitNodeID().IsZero() { - if exitPeer, ok := b.netMap.PeerWithStableID(prefs.ExitNodeID()); ok { + if exitPeer, ok := nm.PeerWithStableID(prefs.ExitNodeID()); ok { s.ExitNodeStatus = &ipnstate.ExitNodeStatus{ ID: prefs.ExitNodeID(), Online: exitPeer.Online().Get(), @@ -1206,8 +1223,8 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { }) var tailscaleIPs []netip.Addr - if b.netMap != nil { - addrs := b.netMap.GetAddresses() + if nm != nil { + addrs := nm.GetAddresses() for i := range addrs.Len() { if addr := addrs.At(i); addr.IsSingleIP() { sb.AddTailscaleIP(addr.Addr()) @@ -1219,14 +1236,14 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { sb.MutateSelfStatus(func(ss *ipnstate.PeerStatus) { ss.OS = version.OS() ss.Online = b.health.GetInPollNetMap() - if b.netMap != nil { + if nm != nil { ss.InNetworkMap = true - if hi := b.netMap.SelfNode.Hostinfo(); hi.Valid() { + if hi := nm.SelfNode.Hostinfo(); hi.Valid() { ss.HostName = hi.Hostname() } - ss.DNSName = b.netMap.Name - ss.UserID = b.netMap.User() - if sn := b.netMap.SelfNode; sn.Valid() { + ss.DNSName = nm.Name + ss.UserID = nm.User() + if sn := nm.SelfNode; sn.Valid() { peerStatusFromNode(ss, sn) if cm := sn.CapMap(); cm.Len() > 0 { ss.Capabilities = make([]tailcfg.NodeCapability, 1, cm.Len()+1) @@ -1259,14 +1276,16 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { return } - for id, up := range b.netMap.UserProfiles { + for id, up := range nm.UserProfiles { sb.AddUser(id, up) } exitNodeID := b.pm.CurrentPrefs().ExitNodeID() - for _, p := range b.peers { + for _, p := range cn.Peers() { tailscaleIPs := make([]netip.Addr, 0, p.Addresses().Len()) for i := range p.Addresses().Len() { addr := p.Addresses().At(i) @@ -1355,18 +1374,10 @@ func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { // WhoIsNodeKey returns the peer info of given public key, if it exists. func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { - b.mu.Lock() - defer b.mu.Unlock() - // TODO(bradfitz): add nodeByKey like nodeByAddr instead of walking peers. - if b.netMap == nil { - return n, u, false - } - if self := b.netMap.SelfNode; self.Valid() && self.Key() == k { - return self, profileFromView(b.netMap.UserProfiles[self.User()]), true - } - for _, n := range b.peers { - if n.Key() == k { - up, ok := b.netMap.UserProfiles[n.User()] + cn := b.currentNode() + if nid, ok := cn.NodeByKey(k); ok { + if n, ok := cn.PeerByID(nid); ok { + up, ok := cn.NetMap().UserProfiles[n.User()] u = profileFromView(up) return n, u, ok } @@ -1399,7 +1410,8 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi return zero, u, false } - nid, ok := b.nodeByAddr[ipp.Addr()] + cn := b.currentNode() + nid, ok := cn.NodeByAddr(ipp.Addr()) if !ok { var ip netip.Addr if ipp.Port() != 0 { @@ -1421,23 +1433,24 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi if !ok { return failf("no IP found in ProxyMapper for %v", ipp) } - nid, ok = b.nodeByAddr[ip] + nid, ok = cn.NodeByAddr(ip) if !ok { return failf("no node for proxymapped IP %v", ip) } } - if b.netMap == nil { + nm := cn.NetMap() + if nm == nil { return failf("no netmap") } - n, ok = b.peers[nid] + n, ok = cn.PeerByID(nid) if !ok { // Check if this the self-node, which would not appear in peers. - if !b.netMap.SelfNode.Valid() || nid != b.netMap.SelfNode.ID() { + if !nm.SelfNode.Valid() || nid != nm.SelfNode.ID() { return zero, u, false } - n = b.netMap.SelfNode + n = nm.SelfNode } - up, ok := b.netMap.UserProfiles[n.User()] + up, ok := cn.UserByID(n.User()) if !ok { return failf("no userprofile for node %v", n.Key()) } @@ -1447,12 +1460,33 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi // PeerCaps returns the capabilities that remote src IP has to // ths current node. func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { + return b.currentNode().PeerCaps(src) +} + +func (b *localNodeContext) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + b.mu.Lock() + defer b.mu.Unlock() + ret := base + if b.netMap == nil { + return ret + } + for _, peer := range b.netMap.Peers { + if pred(peer) { + ret = append(ret, peer) + } + } + return ret +} + +// PeerCaps returns the capabilities that remote src IP has to +// ths current node. +func (b *localNodeContext) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { b.mu.Lock() defer b.mu.Unlock() return b.peerCapsLocked(src) } -func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { +func (b *localNodeContext) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { if b.netMap == nil { return nil } @@ -1474,7 +1508,7 @@ func (b *LocalBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap { return nil } -func (b *LocalBackend) GetFilterForTest() *filter.Filter { +func (b *localNodeContext) GetFilterForTest() *filter.Filter { return b.filterAtomic.Load() } @@ -1578,8 +1612,9 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.mu.Lock() prefsChanged := false + cn := b.currentNode() prefs := b.pm.CurrentPrefs().AsStruct() - oldNetMap := b.netMap + oldNetMap := cn.NetMap() curNetMap := st.NetMap if curNetMap == nil { // The status didn't include a netmap update, so the old one is still @@ -1699,7 +1734,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.tkaFilterNetmapLocked(st.NetMap) } b.setNetMapLocked(st.NetMap) - b.updateFilterLocked(st.NetMap, prefs.View()) + b.updateFilterLocked(prefs.View()) } b.mu.Unlock() @@ -1965,19 +2000,30 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.mu.Lock() defer b.mu.Unlock() - if !b.updateNetmapDeltaLocked(muts) { - return false + cn := b.currentNode() + cn.UpdateNetmapDelta(muts) + + // If auto exit nodes are enabled and our exit node went offline, + // we need to schedule picking a new one. + // TODO(nickkhyl): move the auto exit node logic to a feature package. + if shouldAutoExitNode() { + exitNodeID := b.pm.prefs.ExitNodeID() + for _, m := range muts { + mo, ok := m.(netmap.NodeMutationOnline) + if !ok || mo.Online { + continue + } + n, ok := cn.PeerByID(m.NodeIDBeingMutated()) + if !ok || n.StableID() != exitNodeID { + continue + } + b.goTracker.Go(b.pickNewAutoExitNode) + break + } } - if b.netMap != nil && mutationsAreWorthyOfTellingIPNBus(muts) { - nm := ptr.To(*b.netMap) // shallow clone - nm.Peers = make([]tailcfg.NodeView, 0, len(b.peers)) - for _, p := range b.peers { - nm.Peers = append(nm.Peers, p) - } - slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { - return cmp.Compare(a.ID(), b.ID()) - }) + if cn.NetMap() != nil && mutationsAreWorthyOfTellingIPNBus(muts) { + nm := cn.netMapWithPeers() notify = &ipn.Notify{NetMap: nm} } else if testenv.InTest() { // In tests, send an empty Notify as a wake-up so end-to-end @@ -1988,6 +2034,20 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } +func (c *localNodeContext) netMapWithPeers() *netmap.NetworkMap { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return nil + } + nm := ptr.To(*c.netMap) // shallow clone + nm.Peers = slicesx.MapValues(c.peers) + slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + return nm +} + // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -2018,8 +2078,10 @@ func (b *LocalBackend) pickNewAutoExitNode() { b.send(ipn.Notify{Prefs: &newPrefs}) } -func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (handled bool) { - if b.netMap == nil || len(b.peers) == 0 { +func (c *localNodeContext) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil || len(c.peers) == 0 { return false } @@ -2031,7 +2093,7 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { - nv, ok := b.peers[m.NodeIDBeingMutated()] + nv, ok := c.peers[m.NodeIDBeingMutated()] if !ok { // TODO(bradfitz): unexpected metric? return false @@ -2040,15 +2102,9 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand mak.Set(&mutableNodes, nv.ID(), n) } m.Apply(n) - - // If our exit node went offline, we need to schedule picking - // a new one. - if mo, ok := m.(netmap.NodeMutationOnline); ok && !mo.Online && n.StableID == b.pm.prefs.ExitNodeID() && shouldAutoExitNode() { - b.goTracker.Go(b.pickNewAutoExitNode) - } } for nid, n := range mutableNodes { - b.peers[nid] = n.View() + c.peers[nid] = n.View() } return true } @@ -2195,15 +2251,6 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// NodeViewByIDForTest returns the state of the node with the given ID -// for integration tests in another repo. -func (b *LocalBackend) NodeViewByIDForTest(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { - b.mu.Lock() - defer b.mu.Unlock() - n, ok := b.peers[id] - return n, ok -} - // DisablePortMapperForTest disables the portmapper for tests. // It must be called before Start. func (b *LocalBackend) DisablePortMapperForTest() { @@ -2215,6 +2262,10 @@ func (b *LocalBackend) DisablePortMapperForTest() { // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + return b.currentNode().PeersForTest() +} + +func (b *localNodeContext) PeersForTest() []tailcfg.NodeView { b.mu.Lock() defer b.mu.Unlock() ret := slicesx.MapValues(b.peers) @@ -2308,15 +2359,13 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.hostinfo = hostinfo b.state = ipn.NoState + cn := b.currentNode() if opts.UpdatePrefs != nil { oldPrefs := b.pm.CurrentPrefs() newPrefs := opts.UpdatePrefs.Clone() newPrefs.Persist = oldPrefs.Persist().AsStruct() pv := newPrefs.View() - if err := b.pm.SetPrefs(pv, ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - }); err != nil { + if err := b.pm.SetPrefs(pv, cn.NetworkProfile()); err != nil { b.logf("failed to save UpdatePrefs state: %v", err) } } @@ -2327,7 +2376,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // This is important in two cases: when opts.UpdatePrefs is not nil, // and when Always Mode is enabled and we need to set WantRunning to true. if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - setExitNodeID(newp, b.netMap) + setExitNodeID(newp, cn.NetMap()) b.pm.setPrefsNoPermCheck(newp.View()) } prefs := b.pm.CurrentPrefs() @@ -2496,13 +2545,24 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ // given netMap and user preferences. // // b.mu must be held. -func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.PrefsView) { +func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { + // TODO(nickkhyl) split this into two functions: + // - (*localNodeContext).RebuildFilters() (normalFilter, jailedFilter *filter.Filter, changed bool), + // which would return packet filters for the current state and whether they changed since the last call. + // - (*LocalBackend).updateFilters(), which would use the above to update the engine with the new filters, + // notify b.sshServer, etc. + // + // For this, we would need to plumb a few more things into the [localNodeContext]. Most importantly, + // the current [ipn.PrefsView]), but also maybe also a b.logf and a b.health? + // // NOTE(danderson): keep change detection as the first thing in // this function. Don't try to optimize by returning early, more // likely than not you'll just end up breaking the change // detection and end up with the wrong filter installed. This is // quite hard to debug, so save yourself the trouble. var ( + cn = b.currentNode() + netMap = cn.NetMap() haveNetmap = netMap != nil addrs views.Slice[netip.Prefix] packetFilter []filter.Match @@ -2521,7 +2581,7 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P } packetFilter = netMap.PacketFilter - if packetFilterPermitsUnlockedNodes(b.peers, packetFilter) { + if cn.unlockedNodesPermitted(packetFilter) { b.health.SetUnhealthy(invalidPacketFilterWarnable, nil) packetFilter = nil } else { @@ -2702,11 +2762,9 @@ func (b *LocalBackend) performCaptiveDetection() { } d := captivedetection.NewDetector(b.logf) - var dm *tailcfg.DERPMap - b.mu.Lock() - if b.netMap != nil { - dm = b.netMap.DERPMap - } + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() preferredDERP := 0 if b.hostinfo != nil { if b.hostinfo.NetInfo != nil { @@ -2773,11 +2831,17 @@ func packetFilterPermitsUnlockedNodes(peers map[tailcfg.NodeID]tailcfg.NodeView, return false } +// TODO(nickkhyl): this should be non-existent with a proper [LocalBackend.updateFilterLocked]. +// See the comment in that function for more details. func (b *LocalBackend) setFilter(f *filter.Filter) { - b.filterAtomic.Store(f) + b.currentNode().setFilter(f) b.e.SetFilter(f) } +func (c *localNodeContext) setFilter(f *filter.Filter) { + c.filterAtomic.Store(f) +} + var removeFromDefaultRoute = []netip.Prefix{ // RFC1918 LAN ranges netip.MustParsePrefix("192.168.0.0/16"), @@ -3029,6 +3093,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares if mask&initialBits != 0 { + cn := b.currentNode() ini = &ipn.Notify{Version: version.Long()} if mask&ipn.NotifyInitialState != 0 { ini.SessionID = sessionID @@ -3041,9 +3106,9 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A ini.Prefs = ptr.To(b.sanitizedPrefsLocked()) } if mask&ipn.NotifyInitialNetMap != 0 { - ini.NetMap = b.netMap + ini.NetMap = cn.NetMap() } - if mask&ipn.NotifyInitialDriveShares != 0 && b.driveSharingEnabledLocked() { + if mask&ipn.NotifyInitialDriveShares != 0 && b.DriveSharingEnabled() { ini.DriveShares = b.pm.prefs.DriveShares() } if mask&ipn.NotifyInitialHealthState != 0 { @@ -3137,11 +3202,7 @@ func (b *LocalBackend) DebugNotify(n ipn.Notify) { // // It should only be used via the LocalAPI's debug handler. func (b *LocalBackend) DebugNotifyLastNetMap() { - b.mu.Lock() - nm := b.netMap - b.mu.Unlock() - - if nm != nil { + if nm := b.currentNode().NetMap(); nm != nil { b.send(ipn.Notify{NetMap: nm}) } } @@ -3155,7 +3216,8 @@ func (b *LocalBackend) DebugNotifyLastNetMap() { func (b *LocalBackend) DebugForceNetmapUpdate() { b.mu.Lock() defer b.mu.Unlock() - nm := b.netMap + // TODO(nickkhyl): this all should be done in [LocalBackend.setNetMapLocked]. + nm := b.currentNode().NetMap() b.e.SetNetworkMap(nm) if nm != nil { b.MagicConn().SetDERPMap(nm.DERPMap) @@ -3583,7 +3645,7 @@ func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tail b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) return } - nm := b.netMap + nm := b.currentNode().NetMap() if nm == nil { b.logf("can't set intercept function for Service TCP Ports, netMap is nil") return @@ -3839,15 +3901,17 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt // in Hostinfo. When the user preferences currently request "shields up" // mode, all inbound connections are refused, so services are not reported. // Otherwise, shouldUploadServices respects NetMap.CollectServices. +// TODO(nickkhyl): move this into [localNodeContext]? func (b *LocalBackend) shouldUploadServices() bool { b.mu.Lock() defer b.mu.Unlock() p := b.pm.CurrentPrefs() - if !p.Valid() || b.netMap == nil { + nm := b.currentNode().NetMap() + if !p.Valid() || nm == nil { return false // default to safest setting } - return !p.ShieldsUp() && b.netMap.CollectServices + return !p.ShieldsUp() && nm.CollectServices } // SetCurrentUser is used to implement support for multi-user systems (only @@ -4068,13 +4132,12 @@ func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { if envknob.SSHIgnoreTailnetPolicy() || envknob.SSHPolicyFile() != "" { return nil } - if b.netMap != nil { - if !b.netMap.HasCap(tailcfg.CapabilitySSH) { - if b.isDefaultServerLocked() { - return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet. See https://tailscale.com/s/ssh") - } - return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet.") + // Assume that we do have the SSH capability if don't have a netmap yet. + if !b.currentNode().SelfHasCapOr(tailcfg.CapabilitySSH, true) { + if b.isDefaultServerLocked() { + return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet. See https://tailscale.com/s/ssh") } + return errors.New("Unable to enable local Tailscale SSH server; not enabled on Tailnet.") } return nil } @@ -4086,7 +4149,7 @@ func (b *LocalBackend) sshOnButUnusableHealthCheckMessageLocked() (healthMessage if envknob.SSHIgnoreTailnetPolicy() || envknob.SSHPolicyFile() != "" { return "development SSH policy in use" } - nm := b.netMap + nm := b.currentNode().NetMap() if nm == nil { return "" } @@ -4413,7 +4476,8 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { defer unlock() - netMap := b.netMap + cn := b.currentNode() + netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) oldp := b.pm.CurrentPrefs() @@ -4438,7 +4502,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) hostInfoChanged := !oldHi.Equal(newHi) cc := b.cc - b.updateFilterLocked(netMap, newp.View()) + b.updateFilterLocked(newp.View()) if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { @@ -4462,13 +4526,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } prefs := newp.View() - np := b.pm.CurrentProfile().NetworkProfile() - if netMap != nil { - np = ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - } - } + np := cmp.Or(cn.NetworkProfile(), b.pm.CurrentProfile().NetworkProfile()) if err := b.pm.SetPrefs(prefs, np); err != nil { b.logf("failed to save new controlclient state: %v", err) } else if prefs.WantRunning() { @@ -4712,9 +4770,13 @@ func extractPeerAPIPorts(services []tailcfg.Service) portPair { // NetMap returns the latest cached network map received from // controlclient, or nil if no network map was received yet. func (b *LocalBackend) NetMap() *netmap.NetworkMap { - b.mu.Lock() - defer b.mu.Unlock() - return b.netMap + return b.currentNode().NetMap() +} + +func (c *localNodeContext) NetMap() *netmap.NetworkMap { + c.mu.Lock() + defer c.mu.Unlock() + return c.netMap } func (b *LocalBackend) isEngineBlocked() bool { @@ -4843,12 +4905,13 @@ func (b *LocalBackend) authReconfig() { b.mu.Lock() blocked := b.blocked prefs := b.pm.CurrentPrefs() - nm := b.netMap + cn := b.currentNode() + nm := cn.NetMap() hasPAC := b.prevIfState.HasPAC() - disableSubnetsIfPAC := nm.HasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - userDialUseRoutes := nm.HasCap(tailcfg.NodeAttrUserDialUseRoutes) - dohURL, dohURLOK := exitNodeCanProxyDNS(nm, b.peers, prefs.ExitNodeID()) - dcfg := dnsConfigForNetmap(nm, b.peers, prefs, b.keyExpired, b.logf, version.OS()) + disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) + userDialUseRoutes := cn.SelfHasCap(tailcfg.NodeAttrUserDialUseRoutes) + dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) closing := b.shutdownCalled @@ -4955,6 +5018,12 @@ func shouldUseOneCGNATRoute(logf logger.Logf, mon *netmon.Monitor, controlKnobs return false } +func (c *localNodeContext) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { + c.mu.Lock() + defer c.mu.Unlock() + return dnsConfigForNetmap(c.netMap, c.peers, prefs, selfExpired, logf, versionOS) +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // @@ -5245,7 +5314,9 @@ func (b *LocalBackend) initPeerAPIListener() { return } - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { // We're called from authReconfig which checks that // netMap is non-nil, but if a concurrent Logout, // ResetForClientDisconnect, or Start happens when its @@ -5255,7 +5326,7 @@ func (b *LocalBackend) initPeerAPIListener() { return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() if addrs.Len() == len(b.peerAPIListeners) { allSame := true for i, pln := range b.peerAPIListeners { @@ -5273,8 +5344,8 @@ func (b *LocalBackend) initPeerAPIListener() { b.closePeerAPIListenersLocked() - selfNode := b.netMap.SelfNode - if !selfNode.Valid() || b.netMap.GetAddresses().Len() == 0 { + selfNode := nm.SelfNode + if !selfNode.Valid() || nm.GetAddresses().Len() == 0 { b.logf("[v1] initPeerAPIListener: no addresses in netmap") return } @@ -5568,6 +5639,7 @@ func (b *LocalBackend) enterState(newState ipn.State) { // enterStateLockedOnEntry is like enterState but requires b.mu be held to call // it, but it unlocks b.mu when done (via unlock, a once func). func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { + cn := b.currentNode() oldState := b.state b.state = newState prefs := b.pm.CurrentPrefs() @@ -5580,7 +5652,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock panic("[unexpected] use of main control server in integration test") } - netMap := b.netMap + netMap := cn.NetMap() activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { @@ -5685,7 +5757,8 @@ func (b *LocalBackend) NodeKey() key.NodePublic { func (b *LocalBackend) nextStateLocked() ipn.State { var ( cc = b.cc - netMap = b.netMap + cn = b.currentNode() + netMap = cn.NetMap() state = b.state blocked = b.blocked st = b.engineStatus @@ -6071,6 +6144,14 @@ func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPre return newPrefs } +func (c *localNodeContext) SetNetMap(nm *netmap.NetworkMap) { + c.mu.Lock() + defer c.mu.Unlock() + c.netMap = nm + c.updateNodeByAddrLocked() + c.updatePeersLocked() +} + // setNetMapLocked updates the LocalBackend state to reflect the newly // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. @@ -6083,8 +6164,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { if nm != nil { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") } - b.netMap = nm - b.updatePeersFromNetmapLocked(nm) + b.currentNode().SetNetMap(nm) if login != b.activeLogin { b.logf("active login: %v", login) b.activeLogin = login @@ -6124,14 +6204,33 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) b.ipVIPServiceMap = nm.GetIPVIPServiceMap() if nm == nil { - b.nodeByAddr = nil - // If there is no netmap, the client is going into a "turned off" // state so reset the metrics. b.metrics.approvedRoutes.Set(0) return } + if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } + } + b.metrics.approvedRoutes.Set(approved) + } + + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() +} + +func (b *localNodeContext) updateNodeByAddrLocked() { + nm := b.netMap + if nm == nil { + b.nodeByAddr = nil + return + } + // Update the nodeByAddr index. if b.nodeByAddr == nil { b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{} @@ -6149,14 +6248,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } if nm.SelfNode.Valid() { addNode(nm.SelfNode) - - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ - } - } - b.metrics.approvedRoutes.Set(approved) } for _, p := range nm.Peers { addNode(p) @@ -6167,12 +6258,10 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { delete(b.nodeByAddr, k) } } - - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() } -func (b *LocalBackend) updatePeersFromNetmapLocked(nm *netmap.NetworkMap) { +func (b *localNodeContext) updatePeersLocked() { + nm := b.netMap if nm == nil { b.peers = nil return @@ -6291,7 +6380,7 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err } dt.b.mu.Lock() - selfNodeKey := dt.b.netMap.SelfNode.Key().ShortString() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() dt.b.mu.Unlock() n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) shareNodeKey := "unknown" @@ -6366,7 +6455,7 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { // the method to only run the reset-logic and not reload the store from memory to ensure // foreground sessions are not removed if they are not saved on disk. func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { // We're not logged in, so we don't have a profile. // Don't try to load the serve config. b.lastServeConfJSON = mem.B(nil) @@ -6576,7 +6665,15 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK return mk, nk } -func (b *LocalBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { +// PeerHasCap reports whether the peer with the given Tailscale IP addresses +// contains the given capability string, with any value(s). +func (b *localNodeContext) PeerHasCap(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { + b.mu.Lock() + defer b.mu.Unlock() + return b.peerHasCapLocked(addr, wantCap) +} + +func (b *localNodeContext) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool { return b.peerCapsLocked(addr).HasCapability(wantCap) } @@ -6640,6 +6737,19 @@ func peerAPIURL(ip netip.Addr, port uint16) string { return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port)) } +func (c *localNodeContext) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return c.PeerAPIBase(p) != "" +} + +// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI, +// or the empty string if the peer is invalid or doesn't support PeerAPI. +func (c *localNodeContext) PeerAPIBase(p tailcfg.NodeView) string { + c.mu.Lock() + nm := c.netMap + c.mu.Unlock() + return peerAPIBase(nm, p) +} + // peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI. // It returns the empty string if the peer doesn't support the peerapi // or there's no matching address family based on the netmap's own addresses. @@ -6766,12 +6876,7 @@ func (b *LocalBackend) SetUDPGROForwarding() error { // DERPMap returns the current DERPMap in use, or nil if not connected. func (b *LocalBackend) DERPMap() *tailcfg.DERPMap { - b.mu.Lock() - defer b.mu.Unlock() - if b.netMap == nil { - return nil - } - return b.netMap.DERPMap + return b.currentNode().DERPMap() } // OfferingExitNode reports whether b is currently offering exit node @@ -6811,7 +6916,7 @@ func (b *LocalBackend) OfferingAppConnector() bool { func (b *LocalBackend) allowExitNodeDNSProxyToServeName(name string) bool { b.mu.Lock() defer b.mu.Unlock() - nm := b.netMap + nm := b.NetMap() if nm == nil { return false } @@ -6882,6 +6987,12 @@ func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg return "", false } +func (c *localNodeContext) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + return exitNodeCanProxyDNS(c.netMap, c.peers, exitNodeID) +} + // wireguardExitNodeDNSResolvers returns the DNS resolvers to use for a // WireGuard-only exit node, if it has resolver addresses. func wireguardExitNodeDNSResolvers(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) ([]*dnstype.Resolver, bool) { @@ -6957,7 +7068,7 @@ func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Respons b.mu.Lock() cc := b.ccAuto - if nm := b.netMap; nm != nil { + if nm := b.NetMap(); nm != nil { priv = nm.PrivateKey } b.mu.Unlock() @@ -7089,11 +7200,12 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re defer b.mu.Unlock() io.WriteString(w, "

Tailscale

\n") - if b.netMap == nil { + nm := b.currentNode().NetMap() + if nm == nil { io.WriteString(w, "No netmap.\n") return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() if addrs.Len() == 0 { io.WriteString(w, "No local addresses.\n") return @@ -7124,7 +7236,7 @@ func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { // controlplane. checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { b.mu.Lock() - nm := b.netMap + nm := b.NetMap() b.mu.Unlock() if nm == nil { return nil @@ -7299,8 +7411,9 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } + b.currentNodeAtomic.Store(newLocalNodeContext()) b.setNetMapLocked(nil) // Reset netmap. - b.updateFilterLocked(nil, ipn.PrefsView{}) + b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { @@ -7663,7 +7776,7 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (response apitype.ExitNodeSuggestionResponse, err error) { // netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). If netMap is nil, then b.netMap is used. if netMap == nil { - netMap = b.netMap + netMap = b.NetMap() } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode @@ -7988,21 +8101,19 @@ func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { // rules that require a source IP to have a certain node capability. // // TODO(bradfitz): optimize this later if/when it matters. +// TODO(nickkhyl): move this into [localNodeContext] along with [LocalBackend.updateFilterLocked]. func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCapability) bool { if cap == "" { // Shouldn't happen, but just in case. // But the empty cap also shouldn't be found in Node.CapMap. return false } - - b.mu.Lock() - defer b.mu.Unlock() - - nodeID, ok := b.nodeByAddr[srcIP] + cn := b.currentNode() + nodeID, ok := cn.NodeByAddr(srcIP) if !ok { return false } - n, ok := b.peers[nodeID] + n, ok := cn.PeerByID(nodeID) if !ok { return false } diff --git a/ipn/ipnlocal/local_node_context.go b/ipn/ipnlocal/local_node_context.go new file mode 100644 index 000000000..871880893 --- /dev/null +++ b/ipn/ipnlocal/local_node_context.go @@ -0,0 +1,207 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "net/netip" + "sync" + "sync/atomic" + + "go4.org/netipx" + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/util/slicesx" + "tailscale.com/wgengine/filter" +) + +// localNodeContext holds the [LocalBackend]'s context tied to a local node (usually the current one). +// +// Its exported methods are safe for concurrent use, but the struct is not a snapshot of state at a given moment; +// its state can change between calls. For example, asking for the same value (e.g., netmap or prefs) twice +// may return different results. Returned values are immutable and safe for concurrent use. +// +// If both the [LocalBackend]'s internal mutex and the [localNodeContext] mutex must be held at the same time, +// the [LocalBackend] mutex must be acquired first. See the comment on the [LocalBackend] field for more details. +// +// Two pointers to different [localNodeContext] instances represent different local nodes. +// However, there's currently a bug where a new [localNodeContext] might not be created +// during an implicit node switch (see tailscale/corp#28014). + +// In the future, we might want to include at least the following in this struct (in addition to the current fields). +// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions, +// peer API handlers, etc.). +// - [ipn.State]: when the LocalBackend switches to a different [localNodeContext], it can update the state of the old one. +// - [ipn.LoginProfileView] and [ipn.Prefs]: we should update them when the [profileManager] reports changes to them. +// In the future, [profileManager] (and the corresponding methods of the [LocalBackend]) can be made optional, +// and something else could be used to set them once or update them as needed. +// - [tailcfg.HostinfoView]: it includes certain fields that are tied to the current profile/node/prefs. We should also +// update to build it once instead of mutating it in twelvety different places. +// - [filter.Filter] (normal and jailed, along with the filterHash): the localNodeContext could have a method to (re-)build +// the filter for the current netmap/prefs (see [LocalBackend.updateFilterLocked]), and it needs to track the current +// filters and their hash. +// - Fields related to a requested or required (re-)auth: authURL, authURLTime, authActor, keyExpired, etc. +// - [controlclient.Client]/[*controlclient.Auto]: the current control client. It is ties to a node identity. +// - [tkaState]: it is tied to the current profile / node. +// - Fields related to scheduled node expiration: nmExpiryTimer, numClientStatusCalls, [expiryManager]. +// +// It should not include any fields used by specific features that don't belong in [LocalBackend]. +// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature +// into a separate package and have it install proper hooks. +type localNodeContext struct { + // filterAtomic is a stateful packet filter. Immutable once created, but can be + // replaced with a new one. + filterAtomic atomic.Pointer[filter.Filter] + + // TODO(nickkhyl): maybe use sync.RWMutex? + mu sync.Mutex // protects the following fields + + // NetMap is the most recently set full netmap from the controlclient. + // It can't be mutated in place once set. Because it can't be mutated in place, + // delta updates from the control server don't apply to it. Instead, use + // the peers map to get up-to-date information on the state of peers. + // In general, avoid using the netMap.Peers slice. We'd like it to go away + // as of 2023-09-17. + // TODO(nickkhyl): make it an atomic pointer to avoid the need for a mutex? + netMap *netmap.NetworkMap + + // peers is the set of current peers and their current values after applying + // delta node mutations as they come in (with mu held). The map values can be + // given out to callers, but the map itself can be mutated in place (with mu held) + // and must not escape the [localNodeContext]. + peers map[tailcfg.NodeID]tailcfg.NodeView + + // nodeByAddr maps nodes' own addresses (excluding subnet routes) to node IDs. + // It is mutated in place (with mu held) and must not escape the [localNodeContext]. + nodeByAddr map[netip.Addr]tailcfg.NodeID +} + +func newLocalNodeContext() *localNodeContext { + cn := &localNodeContext{} + // Default filter blocks everything and logs nothing. + noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) + cn.filterAtomic.Store(noneFilter) + return cn +} + +func (c *localNodeContext) Self() tailcfg.NodeView { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return tailcfg.NodeView{} + } + return c.netMap.SelfNode +} + +func (c *localNodeContext) SelfUserID() tailcfg.UserID { + self := c.Self() + if !self.Valid() { + return 0 + } + return self.User() +} + +// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap. +func (c *localNodeContext) SelfHasCap(wantCap tailcfg.NodeCapability) bool { + return c.SelfHasCapOr(wantCap, false) +} + +// SelfHasCapOr is like [localNodeContext.SelfHasCap], but returns the specified default value +// if the netmap is not available yet. +func (c *localNodeContext) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return def + } + return c.netMap.AllCaps.Contains(wantCap) +} + +func (c *localNodeContext) NetworkProfile() ipn.NetworkProfile { + c.mu.Lock() + defer c.mu.Unlock() + return ipn.NetworkProfile{ + // These are ok to call with nil netMap. + MagicDNSName: c.netMap.MagicDNSSuffix(), + DomainName: c.netMap.DomainName(), + } +} + +// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]? +func (c *localNodeContext) DERPMap() *tailcfg.DERPMap { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return nil + } + return c.netMap.DERPMap +} + +func (c *localNodeContext) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + nid, ok := c.nodeByAddr[ip] + return nid, ok +} + +func (c *localNodeContext) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + if c.netMap == nil { + return 0, false + } + if self := c.netMap.SelfNode; self.Valid() && self.Key() == k { + return self.ID(), true + } + // TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers. + for _, n := range c.peers { + if n.Key() == k { + return n.ID(), true + } + } + return 0, false +} + +func (c *localNodeContext) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + n, ok := c.peers[id] + return n, ok +} + +func (c *localNodeContext) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) { + c.mu.Lock() + nm := c.netMap + c.mu.Unlock() + if nm == nil { + return tailcfg.UserProfileView{}, false + } + u, ok := nm.UserProfiles[id] + return u, ok +} + +// Peers returns all the current peers in an undefined order. +func (c *localNodeContext) Peers() []tailcfg.NodeView { + c.mu.Lock() + defer c.mu.Unlock() + return slicesx.MapValues(c.peers) +} + +// unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs +// in the specified packet filter. +// +// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here, +// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter. +// Something like (*localNodeContext).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps? +func (c *localNodeContext) unlockedNodesPermitted(packetFilter []filter.Match) bool { + c.mu.Lock() + defer c.mu.Unlock() + return packetFilterPermitsUnlockedNodes(c.peers, packetFilter) +} + +func (c *localNodeContext) filter() *filter.Filter { + return c.filterAtomic.Load() +} diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3b9e08638..94b5d9522 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -920,15 +920,15 @@ func TestWatchNotificationsCallbacks(t *testing.T) { // tests LocalBackend.updateNetmapDeltaLocked func TestUpdateNetmapDelta(t *testing.T) { b := newTestLocalBackend(t) - if b.updateNetmapDeltaLocked(nil) { + if b.currentNode().UpdateNetmapDelta(nil) { t.Errorf("updateNetmapDeltaLocked() = true, want false with nil netmap") } - b.netMap = &netmap.NetworkMap{} + nm := &netmap.NetworkMap{} for i := range 5 { - b.netMap.Peers = append(b.netMap.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View()) + nm.Peers = append(nm.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View()) } - b.updatePeersFromNetmapLocked(b.netMap) + b.currentNode().SetNetMap(nm) someTime := time.Unix(123, 0) muts, ok := netmap.MutationsFromMapResponse(&tailcfg.MapResponse{ @@ -955,7 +955,7 @@ func TestUpdateNetmapDelta(t *testing.T) { t.Fatal("netmap.MutationsFromMapResponse failed") } - if !b.updateNetmapDeltaLocked(muts) { + if !b.currentNode().UpdateNetmapDelta(muts) { t.Fatalf("updateNetmapDeltaLocked() = false, want true with new netmap") } @@ -978,9 +978,9 @@ func TestUpdateNetmapDelta(t *testing.T) { }, } for _, want := range wants { - gotv, ok := b.peers[want.ID] + gotv, ok := b.currentNode().PeerByID(want.ID) if !ok { - t.Errorf("netmap.Peer %v missing from b.peers", want.ID) + t.Errorf("netmap.Peer %v missing from b.profile.Peers", want.ID) continue } got := gotv.AsStruct() @@ -1398,7 +1398,7 @@ func TestCoveredRouteRangeNoDefault(t *testing.T) { func TestReconfigureAppConnector(t *testing.T) { b := newTestBackend(t) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector != nil { t.Fatal("unexpected app connector") } @@ -1411,7 +1411,7 @@ func TestReconfigureAppConnector(t *testing.T) { }, AppConnectorSet: true, }) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector == nil { t.Fatal("expected app connector") } @@ -1422,15 +1422,19 @@ func TestReconfigureAppConnector(t *testing.T) { "connectors": ["tag:example"] }` - b.netMap.SelfNode = (&tailcfg.Node{ - Name: "example.ts.net", - Tags: []string{"tag:example"}, - CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ - "tailscale.com/app-connectors": {tailcfg.RawMessage(appCfg)}, - }), - }).View() + nm := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + Tags: []string{"tag:example"}, + CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + "tailscale.com/app-connectors": {tailcfg.RawMessage(appCfg)}, + }), + }).View(), + } - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.currentNode().SetNetMap(nm) + + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) b.appConnector.Wait(context.Background()) want := []string{"example.com"} @@ -1450,7 +1454,7 @@ func TestReconfigureAppConnector(t *testing.T) { }, AppConnectorSet: true, }) - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) if b.appConnector != nil { t.Fatal("expected no app connector") } @@ -1482,7 +1486,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { }); err != nil { t.Fatal(err) } - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) // Smoke check that AdvertiseRoutes doesn't have the test IP. ip := netip.MustParseAddr("1.2.3.4") @@ -1503,7 +1507,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Mimic b.authReconfigure for the app connector bits. b.mu.Lock() - b.reconfigAppConnectorLocked(b.netMap, b.pm.prefs) + b.reconfigAppConnectorLocked(b.NetMap(), b.pm.prefs) b.mu.Unlock() b.readvertiseAppConnectorRoutes() @@ -1819,7 +1823,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) pm.prefs = test.prefs.View() - b.netMap = test.nm + b.currentNode().SetNetMap(test.nm) b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode @@ -1946,8 +1950,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := newTestLocalBackend(t) - b.netMap = tt.netmap - b.updatePeersFromNetmapLocked(b.netMap) + b.currentNode().SetNetMap(tt.netmap) b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) @@ -2065,14 +2068,14 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { }, }, } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: selfNode.View(), Peers: []tailcfg.NodeView{ peer1, peer2, }, DERPMap: defaultDERPMap, - } + }) b.lastSuggestedExitNode = peer1.StableID() b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) if eid := b.Prefs().ExitNodeID(); eid != peer1.StableID() { @@ -2137,7 +2140,7 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { syspolicy.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - b.netMap = nm + b.currentNode().SetNetMap(nm) b.lastSuggestedExitNode = peer1.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) @@ -3068,9 +3071,11 @@ func TestDriveManageShares(t *testing.T) { b.driveSetSharesLocked(tt.existing) } if !tt.disabled { - self := b.netMap.SelfNode.AsStruct() + nm := ptr.To(*b.currentNode().NetMap()) + self := nm.SelfNode.AsStruct() self.CapMap = tailcfg.NodeCapMap{tailcfg.NodeAttrsTaildriveShare: nil} - b.netMap.SelfNode = self.View() + nm.SelfNode = self.View() + b.currentNode().SetNetMap(nm) b.sys.Set(driveimpl.NewFileSystemForRemote(b.logf)) } b.mu.Unlock() @@ -5323,7 +5328,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }}, }) - f := lb.GetFilterForTest() + f := lb.currentNode().GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) if res != filter.Accept { t.Errorf("Check(2.2.2.2, ...) = %s, want %s", res, filter.Accept) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index e1583dab7..36d39a465 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -516,9 +516,10 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { var selfAuthorized bool nodeKeySignature := &tka.NodeKeySignature{} - if b.netMap != nil { - selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key(), b.netMap.SelfNode.KeySignature().AsSlice()) == nil - if err := nodeKeySignature.Unserialize(b.netMap.SelfNode.KeySignature().AsSlice()); err != nil { + nm := b.currentNode().NetMap() + if nm != nil { + selfAuthorized = b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) == nil + if err := nodeKeySignature.Unserialize(nm.SelfNode.KeySignature().AsSlice()); err != nil { b.logf("failed to decode self node key signature: %v", err) } } @@ -539,9 +540,9 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { } var visible []*ipnstate.TKAPeer - if b.netMap != nil { - visible = make([]*ipnstate.TKAPeer, len(b.netMap.Peers)) - for i, p := range b.netMap.Peers { + if nm != nil { + visible = make([]*ipnstate.TKAPeer, len(nm.Peers)) + for i, p := range nm.Peers { s := tkaStateFromPeer(p) visible[i] = &s } @@ -702,12 +703,10 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { id1, id2 := b.tka.authority.StateIDs() stateID := fmt.Sprintf("%d:%d", id1, id2) + cn := b.currentNode() newPrefs := b.pm.CurrentPrefs().AsStruct().Clone() // .Persist should always be initialized here. newPrefs.Persist.DisallowedTKAStateIDs = append(newPrefs.Persist.DisallowedTKAStateIDs, stateID) - if err := b.pm.SetPrefs(newPrefs.View(), ipn.NetworkProfile{ - MagicDNSName: b.netMap.MagicDNSSuffix(), - DomainName: b.netMap.DomainName(), - }); err != nil { + if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { return fmt.Errorf("saving prefs: %w", err) } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 87437daf8..2b4c07749 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -770,7 +770,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // but an app connector explicitly adds 0.0.0.0/32 (and the // IPv6 equivalent) to make this work (see updateFilterLocked // in LocalBackend). - f := b.filterAtomic.Load() + f := b.currentNode().filter() if f == nil { return false } diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 77c442060..975ed38bb 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -162,9 +162,9 @@ func TestHandlePeerAPI(t *testing.T) { lb := &LocalBackend{ logf: e.logBuf.Logf, capFileSharing: tt.capSharing, - netMap: &netmap.NetworkMap{SelfNode: selfNode.View()}, clock: &tstest.Clock{}, } + lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ isSelf: tt.isSelf, selfNode: selfNode.View(), diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index cc0d219d8..44d63fe54 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -232,7 +232,7 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 } } - nm := b.netMap + nm := b.NetMap() if nm == nil { b.logf("netMap is nil") return @@ -282,7 +282,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string } } - nm := b.netMap + nm := b.NetMap() if nm == nil { return errors.New("netMap is nil") } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 0279ea9be..b9370f877 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -320,7 +320,7 @@ func TestServeConfigServices(t *testing.T) { t.Fatal(err) } - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", CapMap: tailcfg.NodeCapMap{ @@ -334,7 +334,7 @@ func TestServeConfigServices(t *testing.T) { ProfilePicURL: "https://example.com/photo.jpg", }).View(), }, - } + }) tests := []struct { name string @@ -902,7 +902,7 @@ func newTestBackend(t *testing.T) *LocalBackend { pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm - b.netMap = &netmap.NetworkMap{ + b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", }).View(), @@ -913,24 +913,26 @@ func newTestBackend(t *testing.T) *LocalBackend { ProfilePicURL: "https://example.com/photo.jpg", }).View(), }, - } - b.peers = map[tailcfg.NodeID]tailcfg.NodeView{ - 152: (&tailcfg.Node{ - ID: 152, - ComputedName: "some-peer", - User: tailcfg.UserID(1), - }).View(), - 153: (&tailcfg.Node{ - ID: 153, - ComputedName: "some-tagged-peer", - Tags: []string{"tag:server", "tag:test"}, - User: tailcfg.UserID(1), - }).View(), - } - b.nodeByAddr = map[netip.Addr]tailcfg.NodeID{ - netip.MustParseAddr("100.150.151.152"): 152, - netip.MustParseAddr("100.150.151.153"): 153, - } + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 152, + ComputedName: "some-peer", + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.152/32"), + }, + }).View(), + (&tailcfg.Node{ + ID: 153, + ComputedName: "some-tagged-peer", + Tags: []string{"tag:server", "tag:test"}, + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.153/32"), + }, + }).View(), + }, + }) return b } diff --git a/ipn/ipnlocal/taildrop.go b/ipn/ipnlocal/taildrop.go index 807304f30..17ca40926 100644 --- a/ipn/ipnlocal/taildrop.go +++ b/ipn/ipnlocal/taildrop.go @@ -179,23 +179,32 @@ func (b *LocalBackend) HasCapFileSharing() bool { func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { var ret []*apitype.FileTarget - b.mu.Lock() + b.mu.Lock() // for b.{state,capFileSharing} defer b.mu.Unlock() - nm := b.netMap + cn := b.currentNode() + nm := cn.NetMap() + self := cn.SelfUserID() if b.state != ipn.Running || nm == nil { return nil, errors.New("not connected to the tailnet") } if !b.capFileSharing { return nil, errors.New("file sharing not enabled by Tailscale admin") } - for _, p := range b.peers { - if !b.peerIsTaildropTargetLocked(p) { - continue + peers := cn.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { + if !p.Valid() || p.Hostinfo().OS() == "tvOS" { + return false } - if p.Hostinfo().OS() == "tvOS" { - continue + if self != p.User() { + return false } - peerAPI := peerAPIBase(b.netMap, p) + if p.Addresses().Len() != 0 && cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. + return true + } + return false + }) + for _, p := range peers { + peerAPI := cn.PeerAPIBase(p) if peerAPI == "" { continue } @@ -214,7 +223,9 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if b.state != ipn.Running { return ipnstate.TaildropTargetIpnStateNotRunning } - if b.netMap == nil { + cn := b.currentNode() + nm := cn.NetMap() + if nm == nil { return ipnstate.TaildropTargetNoNetmapAvailable } if !b.capFileSharing { @@ -228,10 +239,10 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if !p.Valid() { return ipnstate.TaildropTargetNoPeerInfo } - if b.netMap.User() != p.User() { + if nm.User() != p.User() { // Different user must have the explicit file sharing target capability - if p.Addresses().Len() == 0 || - !b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + if p.Addresses().Len() == 0 || !cn.PeerHasCap(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { + // Explicitly noted in the netmap ACL caps as a target. return ipnstate.TaildropTargetOwnedByOtherUser } } @@ -239,32 +250,12 @@ func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.Taildro if p.Hostinfo().OS() == "tvOS" { return ipnstate.TaildropTargetUnsupportedOS } - if peerAPIBase(b.netMap, p) == "" { + if !cn.PeerHasPeerAPI(p) { return ipnstate.TaildropTargetNoPeerAPI } return ipnstate.TaildropTargetAvailable } -// peerIsTaildropTargetLocked reports whether p is a valid Taildrop file -// recipient from this node according to its ownership and the capabilities in -// the netmap. -// -// b.mu must be locked. -func (b *LocalBackend) peerIsTaildropTargetLocked(p tailcfg.NodeView) bool { - if b.netMap == nil || !p.Valid() { - return false - } - if b.netMap.User() == p.User() { - return true - } - if p.Addresses().Len() > 0 && - b.peerHasCapLocked(p.Addresses().At(0).Addr(), tailcfg.PeerCapabilityFileSharingTarget) { - // Explicitly noted in the netmap ACL caps as a target. - return true - } - return false -} - // UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and // sends an ipn.Notify with the full list of outgoingFiles. func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) { diff --git a/ipn/ipnlocal/taildrop_test.go b/ipn/ipnlocal/taildrop_test.go index 9871d5e33..a5166e8a3 100644 --- a/ipn/ipnlocal/taildrop_test.go +++ b/ipn/ipnlocal/taildrop_test.go @@ -13,7 +13,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest/deptest" "tailscale.com/types/netmap" - "tailscale.com/util/mak" ) func TestFileTargets(t *testing.T) { @@ -23,7 +22,7 @@ func TestFileTargets(t *testing.T) { t.Errorf("before connect: got %q; want %q", got, want) } - b.netMap = new(netmap.NetworkMap) + b.currentNode().SetNetMap(new(netmap.NetworkMap)) _, err = b.FileTargets() if got, want := fmt.Sprint(err), "not connected to the tailnet"; got != want { t.Errorf("non-running netmap: got %q; want %q", got, want) @@ -44,16 +43,15 @@ func TestFileTargets(t *testing.T) { t.Fatalf("unexpected %d peers", len(got)) } - var peerMap map[tailcfg.NodeID]tailcfg.NodeView - mak.NonNil(&peerMap) - var nodeID tailcfg.NodeID - nodeID = 1234 - peer := &tailcfg.Node{ - ID: 1234, - Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + nm := &netmap.NetworkMap{ + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 1234, + Hostinfo: (&tailcfg.Hostinfo{OS: "tvOS"}).View(), + }).View(), + }, } - peerMap[nodeID] = peer.View() - b.peers = peerMap + b.currentNode().SetNetMap(nm) got, err = b.FileTargets() if err != nil { t.Fatal(err) diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 219a4c535..18145d1bb 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -116,11 +116,12 @@ func (b *LocalBackend) handleWebClientConn(c net.Conn) error { // for each of the local device's Tailscale IP addresses. This is needed to properly // route local traffic when using kernel networking mode. func (b *LocalBackend) updateWebClientListenersLocked() { - if b.netMap == nil { + nm := b.currentNode().NetMap() + if nm == nil { return } - addrs := b.netMap.GetAddresses() + addrs := nm.GetAddresses() for _, pfx := range addrs.All() { addrPort := netip.AddrPortFrom(pfx.Addr(), webClientPort) if _, ok := b.webClientListeners[addrPort]; ok {