mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-29 13:05:46 +00:00
wgengine/magicsock: rename discoEndpoint to just endpoint.
Signed-off-by: David Anderson <danderson@tailscale.com>
This commit is contained in:
parent
f2d949e2db
commit
8049063d35
@ -75,8 +75,8 @@ func useDerpRoute() bool {
|
|||||||
// peerInfo is all the information magicsock tracks about a particular
|
// peerInfo is all the information magicsock tracks about a particular
|
||||||
// peer.
|
// peer.
|
||||||
type peerInfo struct {
|
type peerInfo struct {
|
||||||
node *tailcfg.Node // always present
|
node *tailcfg.Node // always present
|
||||||
ep *discoEndpoint // optional, if wireguard-go isn't currently talking to this peer.
|
ep *endpoint // optional, if wireguard-go isn't currently talking to this peer.
|
||||||
// ipPorts is an inverted version of peerMap.byIPPort (below), so
|
// ipPorts is an inverted version of peerMap.byIPPort (below), so
|
||||||
// that when we're deleting this node, we can rapidly find out the
|
// that when we're deleting this node, we can rapidly find out the
|
||||||
// keys that need deleting from peerMap.byIPPort without having to
|
// keys that need deleting from peerMap.byIPPort without having to
|
||||||
@ -137,9 +137,9 @@ func (m *peerMap) nodeForNodeKey(nk tailcfg.NodeKey) (n *tailcfg.Node, ok bool)
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoEndpointForDiscoKey returns the discoEndpoint for dk, or nil
|
// endpointForDiscoKey returns the endpoint for dk, or nil
|
||||||
// if dk is not known to us.
|
// if dk is not known to us.
|
||||||
func (m *peerMap) discoEndpointForDiscoKey(dk tailcfg.DiscoKey) (ep *discoEndpoint, ok bool) {
|
func (m *peerMap) endpointForDiscoKey(dk tailcfg.DiscoKey) (ep *endpoint, ok bool) {
|
||||||
if dk.IsZero() {
|
if dk.IsZero() {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -149,9 +149,9 @@ func (m *peerMap) discoEndpointForDiscoKey(dk tailcfg.DiscoKey) (ep *discoEndpoi
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoEndpointForNodeKey returns the discoEndpoint for nk, or nil if
|
// endpointForNodeKey returns the endpoint for nk, or nil if
|
||||||
// nk is not known to us.
|
// nk is not known to us.
|
||||||
func (m *peerMap) discoEndpointForNodeKey(nk tailcfg.NodeKey) (ep *discoEndpoint, ok bool) {
|
func (m *peerMap) endpointForNodeKey(nk tailcfg.NodeKey) (ep *endpoint, ok bool) {
|
||||||
if nk.IsZero() {
|
if nk.IsZero() {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -161,17 +161,17 @@ func (m *peerMap) discoEndpointForNodeKey(nk tailcfg.NodeKey) (ep *discoEndpoint
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoEndpointForIPPort returns the discoEndpoint for the peer we
|
// endpointForIPPort returns the endpoint for the peer we
|
||||||
// believe to be at ipp, or nil if we don't know of any such peer.
|
// believe to be at ipp, or nil if we don't know of any such peer.
|
||||||
func (m *peerMap) discoEndpointForIPPort(ipp netaddr.IPPort) (ep *discoEndpoint, ok bool) {
|
func (m *peerMap) endpointForIPPort(ipp netaddr.IPPort) (ep *endpoint, ok bool) {
|
||||||
if info, ok := m.byIPPort[ipp]; ok && info.ep != nil {
|
if info, ok := m.byIPPort[ipp]; ok && info.ep != nil {
|
||||||
return info.ep, true
|
return info.ep, true
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// forEachDiscoEndpoint invokes f on every discoEndpoint in m.
|
// forEachDiscoEndpoint invokes f on every endpoint in m.
|
||||||
func (m *peerMap) forEachDiscoEndpoint(f func(ep *discoEndpoint)) {
|
func (m *peerMap) forEachDiscoEndpoint(f func(ep *endpoint)) {
|
||||||
for _, pi := range m.byNodeKey {
|
for _, pi := range m.byNodeKey {
|
||||||
if pi.ep != nil {
|
if pi.ep != nil {
|
||||||
f(pi.ep)
|
f(pi.ep)
|
||||||
@ -186,10 +186,10 @@ func (m *peerMap) forEachNode(f func(n *tailcfg.Node)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// upsertDiscoEndpoint stores discoEndpoint in the peerInfo for
|
// upsertDiscoEndpoint stores endpoint in the peerInfo for
|
||||||
// ep.publicKey, and updates indexes. m must already have a
|
// ep.publicKey, and updates indexes. m must already have a
|
||||||
// tailcfg.Node for ep.publicKey.
|
// tailcfg.Node for ep.publicKey.
|
||||||
func (m *peerMap) upsertDiscoEndpoint(ep *discoEndpoint) {
|
func (m *peerMap) upsertDiscoEndpoint(ep *endpoint) {
|
||||||
pi := m.byNodeKey[ep.publicKey]
|
pi := m.byNodeKey[ep.publicKey]
|
||||||
if pi == nil {
|
if pi == nil {
|
||||||
panic("can't have disco endpoint for unknown node")
|
panic("can't have disco endpoint for unknown node")
|
||||||
@ -237,7 +237,7 @@ func (m *peerMap) setDiscoKeyForIPPort(ipp netaddr.IPPort, dk tailcfg.DiscoKey)
|
|||||||
|
|
||||||
// deleteDiscoEndpoint deletes the peerInfo associated with ep, and
|
// deleteDiscoEndpoint deletes the peerInfo associated with ep, and
|
||||||
// updates indexes.
|
// updates indexes.
|
||||||
func (m *peerMap) deleteDiscoEndpoint(ep *discoEndpoint) {
|
func (m *peerMap) deleteDiscoEndpoint(ep *endpoint) {
|
||||||
if ep == nil {
|
if ep == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -390,7 +390,7 @@ type Conn struct {
|
|||||||
|
|
||||||
// onEndpointRefreshed are funcs to run (in their own goroutines)
|
// onEndpointRefreshed are funcs to run (in their own goroutines)
|
||||||
// when endpoints are refreshed.
|
// when endpoints are refreshed.
|
||||||
onEndpointRefreshed map[*discoEndpoint]func()
|
onEndpointRefreshed map[*endpoint]func()
|
||||||
|
|
||||||
// peerSet is the set of peers that are currently configured in
|
// peerSet is the set of peers that are currently configured in
|
||||||
// WireGuard. These are not used to filter inbound or outbound
|
// WireGuard. These are not used to filter inbound or outbound
|
||||||
@ -923,7 +923,7 @@ func (c *Conn) SetNetInfoCallback(fn func(*tailcfg.NetInfo)) {
|
|||||||
func (c *Conn) LastRecvActivityOfDisco(dk tailcfg.DiscoKey) string {
|
func (c *Conn) LastRecvActivityOfDisco(dk tailcfg.DiscoKey) string {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
de, ok := c.peerMap.discoEndpointForDiscoKey(dk)
|
de, ok := c.peerMap.endpointForDiscoKey(dk)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "never"
|
return "never"
|
||||||
}
|
}
|
||||||
@ -955,7 +955,7 @@ func (c *Conn) Ping(peer *tailcfg.Node, res *ipnstate.PingResult, cb func(*ipnst
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
de, ok := c.peerMap.discoEndpointForNodeKey(peer.Key)
|
de, ok := c.peerMap.endpointForNodeKey(peer.Key)
|
||||||
if !ok {
|
if !ok {
|
||||||
node, ok := c.peerMap.nodeForNodeKey(peer.Key)
|
node, ok := c.peerMap.nodeForNodeKey(peer.Key)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -977,7 +977,7 @@ func (c *Conn) Ping(peer *tailcfg.Node, res *ipnstate.PingResult, cb func(*ipnst
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
de, ok = c.peerMap.discoEndpointForNodeKey(peer.Key)
|
de, ok = c.peerMap.endpointForNodeKey(peer.Key)
|
||||||
if !ok {
|
if !ok {
|
||||||
res.Err = "internal error: failed to get endpoint for node key"
|
res.Err = "internal error: failed to get endpoint for node key"
|
||||||
cb(res)
|
cb(res)
|
||||||
@ -1232,7 +1232,7 @@ func (c *Conn) Send(b []byte, ep conn.Endpoint) error {
|
|||||||
if c.networkDown() {
|
if c.networkDown() {
|
||||||
return errNetworkDown
|
return errNetworkDown
|
||||||
}
|
}
|
||||||
return ep.(*discoEndpoint).send(b)
|
return ep.(*endpoint).send(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errConnClosed = errors.New("Conn closed")
|
var errConnClosed = errors.New("Conn closed")
|
||||||
@ -1663,7 +1663,7 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan
|
|||||||
//
|
//
|
||||||
// This should be called whenever a packet arrives from e.
|
// This should be called whenever a packet arrives from e.
|
||||||
func (c *Conn) noteRecvActivityFromEndpoint(e conn.Endpoint) {
|
func (c *Conn) noteRecvActivityFromEndpoint(e conn.Endpoint) {
|
||||||
de, ok := e.(*discoEndpoint)
|
de, ok := e.(*endpoint)
|
||||||
if ok && c.noteRecvActivity != nil && de.isFirstRecvActivityInAwhile() {
|
if ok && c.noteRecvActivity != nil && de.isFirstRecvActivityInAwhile() {
|
||||||
c.noteRecvActivity(de.discoKey)
|
c.noteRecvActivity(de.discoKey)
|
||||||
}
|
}
|
||||||
@ -1721,7 +1721,7 @@ func (c *Conn) receiveIP(b []byte, ipp netaddr.IPPort, cache *ippEndpointCache)
|
|||||||
ep = cache.de
|
ep = cache.de
|
||||||
} else {
|
} else {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
de, ok := c.peerMap.discoEndpointForIPPort(ipp)
|
de, ok := c.peerMap.endpointForIPPort(ipp)
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
@ -1757,7 +1757,7 @@ func (c *connBind) receiveDERP(b []byte) (n int, ep conn.Endpoint, err error) {
|
|||||||
return 0, nil, net.ErrClosed
|
return 0, nil, net.ErrClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *discoEndpoint) {
|
func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *endpoint) {
|
||||||
if dm.copyBuf == nil {
|
if dm.copyBuf == nil {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -1779,7 +1779,7 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *di
|
|||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
nk := tailcfg.NodeKey(dm.src)
|
nk := tailcfg.NodeKey(dm.src)
|
||||||
var ok bool
|
var ok bool
|
||||||
ep, ok = c.peerMap.discoEndpointForNodeKey(nk)
|
ep, ok = c.peerMap.endpointForNodeKey(nk)
|
||||||
if !ok {
|
if !ok {
|
||||||
node, ok := c.peerMap.nodeForNodeKey(nk)
|
node, ok := c.peerMap.nodeForNodeKey(nk)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1812,7 +1812,7 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *di
|
|||||||
// assume that ep != nil.
|
// assume that ep != nil.
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
c.logf("magicsock: DERP packet received from idle peer %v; created=%v", dm.src.ShortString(), ep != nil)
|
c.logf("magicsock: DERP packet received from idle peer %v; created=%v", dm.src.ShortString(), ep != nil)
|
||||||
ep, ok = c.peerMap.discoEndpointForNodeKey(nk)
|
ep, ok = c.peerMap.endpointForNodeKey(nk)
|
||||||
if !ok {
|
if !ok {
|
||||||
// There are a few edge cases where we can still end up
|
// There are a few edge cases where we can still end up
|
||||||
// with a nil ep here. Among them are: the peer was
|
// with a nil ep here. Among them are: the peer was
|
||||||
@ -1942,7 +1942,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netaddr.IPPort) (isDiscoMsg bo
|
|||||||
|
|
||||||
needsRecvActivityCall := false
|
needsRecvActivityCall := false
|
||||||
isLazyCreate := false
|
isLazyCreate := false
|
||||||
de, ok := c.peerMap.discoEndpointForDiscoKey(sender)
|
de, ok := c.peerMap.endpointForDiscoKey(sender)
|
||||||
if !ok {
|
if !ok {
|
||||||
// We know about the node, but it doesn't currently have active WireGuard state.
|
// We know about the node, but it doesn't currently have active WireGuard state.
|
||||||
c.logf("magicsock: got disco message from idle peer, starting lazy conf for %v, %v", peerNode.Key.ShortString(), sender.ShortString())
|
c.logf("magicsock: got disco message from idle peer, starting lazy conf for %v, %v", peerNode.Key.ShortString(), sender.ShortString())
|
||||||
@ -1971,7 +1971,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netaddr.IPPort) (isDiscoMsg bo
|
|||||||
if c.closed || c.privateKey.IsZero() {
|
if c.closed || c.privateKey.IsZero() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
de, ok = c.peerMap.discoEndpointForDiscoKey(sender)
|
de, ok = c.peerMap.endpointForDiscoKey(sender)
|
||||||
if !ok {
|
if !ok {
|
||||||
if _, ok := c.peerMap.nodeForDiscoKey(sender); !ok {
|
if _, ok := c.peerMap.nodeForDiscoKey(sender); !ok {
|
||||||
// They just disappeared while we'd released the lock.
|
// They just disappeared while we'd released the lock.
|
||||||
@ -2050,7 +2050,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netaddr.IPPort) (isDiscoMsg bo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conn) handlePingLocked(dm *disco.Ping, de *discoEndpoint, src netaddr.IPPort, sender tailcfg.DiscoKey, peerNode *tailcfg.Node) {
|
func (c *Conn) handlePingLocked(dm *disco.Ping, de *endpoint, src netaddr.IPPort, sender tailcfg.DiscoKey, peerNode *tailcfg.Node) {
|
||||||
if peerNode == nil {
|
if peerNode == nil {
|
||||||
c.logf("magicsock: disco: [unexpected] ignoring ping from unknown peer Node")
|
c.logf("magicsock: disco: [unexpected] ignoring ping from unknown peer Node")
|
||||||
return
|
return
|
||||||
@ -2081,14 +2081,14 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, de *discoEndpoint, src netaddr.I
|
|||||||
// flipping primary DERPs in the 0-30ms it takes to confirm our STUN endpoint.
|
// flipping primary DERPs in the 0-30ms it takes to confirm our STUN endpoint.
|
||||||
// If they do, traffic will just go over DERP for a bit longer until the next
|
// If they do, traffic will just go over DERP for a bit longer until the next
|
||||||
// discovery round.
|
// discovery round.
|
||||||
func (c *Conn) enqueueCallMeMaybe(derpAddr netaddr.IPPort, de *discoEndpoint) {
|
func (c *Conn) enqueueCallMeMaybe(derpAddr netaddr.IPPort, de *endpoint) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
if !c.lastEndpointsTime.After(time.Now().Add(-endpointsFreshEnoughDuration)) {
|
if !c.lastEndpointsTime.After(time.Now().Add(-endpointsFreshEnoughDuration)) {
|
||||||
c.logf("magicsock: want call-me-maybe but endpoints stale; restunning")
|
c.logf("magicsock: want call-me-maybe but endpoints stale; restunning")
|
||||||
if c.onEndpointRefreshed == nil {
|
if c.onEndpointRefreshed == nil {
|
||||||
c.onEndpointRefreshed = map[*discoEndpoint]func(){}
|
c.onEndpointRefreshed = map[*endpoint]func(){}
|
||||||
}
|
}
|
||||||
c.onEndpointRefreshed[de] = func() {
|
c.onEndpointRefreshed[de] = func() {
|
||||||
c.logf("magicsock: STUN done; sending call-me-maybe to %v %v", de.discoShort, de.publicKey.ShortString())
|
c.logf("magicsock: STUN done; sending call-me-maybe to %v %v", de.discoShort, de.publicKey.ShortString())
|
||||||
@ -2117,7 +2117,7 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netaddr.IPPort, de *discoEndpoint) {
|
|||||||
//
|
//
|
||||||
// c.mu must be held.
|
// c.mu must be held.
|
||||||
func (c *Conn) setAddrToDiscoLocked(src netaddr.IPPort, newk tailcfg.DiscoKey) {
|
func (c *Conn) setAddrToDiscoLocked(src netaddr.IPPort, newk tailcfg.DiscoKey) {
|
||||||
oldEp, ok := c.peerMap.discoEndpointForIPPort(src)
|
oldEp, ok := c.peerMap.endpointForIPPort(src)
|
||||||
if !ok {
|
if !ok {
|
||||||
c.logf("[v1] magicsock: disco: adding mapping of %v to %v", src, newk.ShortString())
|
c.logf("[v1] magicsock: disco: adding mapping of %v to %v", src, newk.ShortString())
|
||||||
} else if oldEp.discoKey != newk {
|
} else if oldEp.discoKey != newk {
|
||||||
@ -2212,7 +2212,7 @@ func (c *Conn) SetPrivateKey(privateKey wgkey.Private) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if newKey.IsZero() {
|
if newKey.IsZero() {
|
||||||
c.peerMap.forEachDiscoEndpoint(func(ep *discoEndpoint) {
|
c.peerMap.forEachDiscoEndpoint(func(ep *endpoint) {
|
||||||
ep.stopAndReset()
|
ep.stopAndReset()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -2303,7 +2303,7 @@ func (c *Conn) SetNetworkMap(nm *netmap.NetworkMap) {
|
|||||||
numNoDisco++
|
numNoDisco++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ep, ok := c.peerMap.discoEndpointForDiscoKey(n.DiscoKey); ok && ep.publicKey == n.Key {
|
if ep, ok := c.peerMap.endpointForDiscoKey(n.DiscoKey); ok && ep.publicKey == n.Key {
|
||||||
ep.updateFromNode(n)
|
ep.updateFromNode(n)
|
||||||
} else if ep != nil {
|
} else if ep != nil {
|
||||||
// Endpoint no longer belongs to the same node. We'll
|
// Endpoint no longer belongs to the same node. We'll
|
||||||
@ -2546,7 +2546,7 @@ func (c *Conn) Close() error {
|
|||||||
c.stopPeriodicReSTUNTimerLocked()
|
c.stopPeriodicReSTUNTimerLocked()
|
||||||
c.portMapper.Close()
|
c.portMapper.Close()
|
||||||
|
|
||||||
c.peerMap.forEachDiscoEndpoint(func(ep *discoEndpoint) {
|
c.peerMap.forEachDiscoEndpoint(func(ep *endpoint) {
|
||||||
ep.stopAndReset()
|
ep.stopAndReset()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -2821,7 +2821,7 @@ func (c *Conn) Rebind() {
|
|||||||
func (c *Conn) resetEndpointStates() {
|
func (c *Conn) resetEndpointStates() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
c.peerMap.forEachDiscoEndpoint(func(ep *discoEndpoint) {
|
c.peerMap.forEachDiscoEndpoint(func(ep *endpoint) {
|
||||||
ep.noteConnectivityChange()
|
ep.noteConnectivityChange()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -2843,8 +2843,6 @@ func packIPPort(ua netaddr.IPPort) []byte {
|
|||||||
|
|
||||||
// ParseEndpoint is called by WireGuard to connect to an endpoint.
|
// ParseEndpoint is called by WireGuard to connect to an endpoint.
|
||||||
// endpointStr is a json-serialized wgcfg.Endpoints struct.
|
// endpointStr is a json-serialized wgcfg.Endpoints struct.
|
||||||
// If those Endpoints contain an active discovery key, ParseEndpoint returns a discoEndpoint.
|
|
||||||
// Otherwise it returns a legacy endpoint.
|
|
||||||
func (c *Conn) ParseEndpoint(endpointStr string) (conn.Endpoint, error) {
|
func (c *Conn) ParseEndpoint(endpointStr string) (conn.Endpoint, error) {
|
||||||
var endpoints wgcfg.Endpoints
|
var endpoints wgcfg.Endpoints
|
||||||
err := json.Unmarshal([]byte(endpointStr), &endpoints)
|
err := json.Unmarshal([]byte(endpointStr), &endpoints)
|
||||||
@ -2866,7 +2864,7 @@ func (c *Conn) ParseEndpoint(endpointStr string) (conn.Endpoint, error) {
|
|||||||
c.logf("[unexpected] magicsock: ParseEndpoint: unknown node key=%s", pk.ShortString())
|
c.logf("[unexpected] magicsock: ParseEndpoint: unknown node key=%s", pk.ShortString())
|
||||||
return nil, fmt.Errorf("magicsock: ParseEndpoint: unknown peer %q", pk.ShortString())
|
return nil, fmt.Errorf("magicsock: ParseEndpoint: unknown peer %q", pk.ShortString())
|
||||||
}
|
}
|
||||||
de := &discoEndpoint{
|
de := &endpoint{
|
||||||
c: c,
|
c: c,
|
||||||
publicKey: tailcfg.NodeKey(pk), // peer public key (for WireGuard + DERP)
|
publicKey: tailcfg.NodeKey(pk), // peer public key (for WireGuard + DERP)
|
||||||
wgEndpoint: endpointStr,
|
wgEndpoint: endpointStr,
|
||||||
@ -3180,7 +3178,7 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
|||||||
ps := &ipnstate.PeerStatus{InMagicSock: true}
|
ps := &ipnstate.PeerStatus{InMagicSock: true}
|
||||||
ps.Addrs = append(ps.Addrs, n.Endpoints...)
|
ps.Addrs = append(ps.Addrs, n.Endpoints...)
|
||||||
ps.Relay = c.derpRegionCodeOfAddrLocked(n.DERP)
|
ps.Relay = c.derpRegionCodeOfAddrLocked(n.DERP)
|
||||||
if ep, ok := c.peerMap.discoEndpointForNodeKey(n.Key); ok {
|
if ep, ok := c.peerMap.endpointForNodeKey(n.Key); ok {
|
||||||
ep.populatePeerStatus(ps)
|
ep.populatePeerStatus(ps)
|
||||||
}
|
}
|
||||||
sb.AddPeer(key.Public(n.Key), ps)
|
sb.AddPeer(key.Public(n.Key), ps)
|
||||||
@ -3201,9 +3199,8 @@ func ippDebugString(ua netaddr.IPPort) string {
|
|||||||
|
|
||||||
// discoEndpoint is a wireguard/conn.Endpoint that picks the best
|
// discoEndpoint is a wireguard/conn.Endpoint that picks the best
|
||||||
// available path to communicate with a peer, based on network
|
// available path to communicate with a peer, based on network
|
||||||
// conditions and what the peer supports. In particular, despite the
|
// conditions and what the peer supports.
|
||||||
// name, an endpoint can support DERP only.
|
type endpoint struct {
|
||||||
type discoEndpoint struct {
|
|
||||||
// atomically accessed; declared first for alignment reasons
|
// atomically accessed; declared first for alignment reasons
|
||||||
lastRecv mono.Time
|
lastRecv mono.Time
|
||||||
numStopAndResetAtomic int64
|
numStopAndResetAtomic int64
|
||||||
@ -3221,7 +3218,7 @@ type discoEndpoint struct {
|
|||||||
lastPingTime time.Time
|
lastPingTime time.Time
|
||||||
|
|
||||||
// mu protects all following fields.
|
// mu protects all following fields.
|
||||||
mu sync.Mutex // Lock ordering: Conn.mu, then discoEndpoint.mu
|
mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu
|
||||||
|
|
||||||
heartBeatTimer *time.Timer // nil when idle
|
heartBeatTimer *time.Timer // nil when idle
|
||||||
lastSend mono.Time // last time there was outgoing packets sent to this peer (from wireguard-go)
|
lastSend mono.Time // last time there was outgoing packets sent to this peer (from wireguard-go)
|
||||||
@ -3245,7 +3242,7 @@ type pendingCLIPing struct {
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// sessionActiveTimeout is how long since the last activity we
|
// sessionActiveTimeout is how long since the last activity we
|
||||||
// try to keep an established discoEndpoint peering alive.
|
// try to keep an established endpoint peering alive.
|
||||||
// It's also the idle time at which we stop doing STUN queries to
|
// It's also the idle time at which we stop doing STUN queries to
|
||||||
// keep NAT mappings alive.
|
// keep NAT mappings alive.
|
||||||
sessionActiveTimeout = 2 * time.Minute
|
sessionActiveTimeout = 2 * time.Minute
|
||||||
@ -3291,10 +3288,10 @@ type pendingCLIPing struct {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// endpointState is some state and history for a specific endpoint of
|
// endpointState is some state and history for a specific endpoint of
|
||||||
// a discoEndpoint. (The subject is the discoEndpoint.endpointState
|
// a endpoint. (The subject is the endpoint.endpointState
|
||||||
// map key)
|
// map key)
|
||||||
type endpointState struct {
|
type endpointState struct {
|
||||||
// all fields guarded by discoEndpoint.mu
|
// all fields guarded by endpoint.mu
|
||||||
|
|
||||||
// lastPing is the last (outgoing) ping time.
|
// lastPing is the last (outgoing) ping time.
|
||||||
lastPing mono.Time
|
lastPing mono.Time
|
||||||
@ -3316,7 +3313,7 @@ type endpointState struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// indexSentinelDeleted is the temporary value that endpointState.index takes while
|
// indexSentinelDeleted is the temporary value that endpointState.index takes while
|
||||||
// a discoEndpoint's endpoints are being updated from a new network map.
|
// a endpoint's endpoints are being updated from a new network map.
|
||||||
const indexSentinelDeleted = -1
|
const indexSentinelDeleted = -1
|
||||||
|
|
||||||
// shouldDeleteLocked reports whether we should delete this endpoint.
|
// shouldDeleteLocked reports whether we should delete this endpoint.
|
||||||
@ -3333,7 +3330,7 @@ func (st *endpointState) shouldDeleteLocked() bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) deleteEndpointLocked(ep netaddr.IPPort) {
|
func (de *endpoint) deleteEndpointLocked(ep netaddr.IPPort) {
|
||||||
delete(de.endpointState, ep)
|
delete(de.endpointState, ep)
|
||||||
if de.bestAddr.IPPort == ep {
|
if de.bestAddr.IPPort == ep {
|
||||||
de.bestAddr = addrLatency{}
|
de.bestAddr = addrLatency{}
|
||||||
@ -3360,7 +3357,7 @@ type sentPing struct {
|
|||||||
// initFakeUDPAddr populates fakeWGAddr with a globally unique fake UDPAddr.
|
// initFakeUDPAddr populates fakeWGAddr with a globally unique fake UDPAddr.
|
||||||
// The current implementation just uses the pointer value of de jammed into an IPv6
|
// The current implementation just uses the pointer value of de jammed into an IPv6
|
||||||
// address, but it could also be, say, a counter.
|
// address, but it could also be, say, a counter.
|
||||||
func (de *discoEndpoint) initFakeUDPAddr() {
|
func (de *endpoint) initFakeUDPAddr() {
|
||||||
var addr [16]byte
|
var addr [16]byte
|
||||||
addr[0] = 0xfd
|
addr[0] = 0xfd
|
||||||
addr[1] = 0x00
|
addr[1] = 0x00
|
||||||
@ -3371,7 +3368,7 @@ func (de *discoEndpoint) initFakeUDPAddr() {
|
|||||||
// isFirstRecvActivityInAwhile notes that receive activity has occurred for this
|
// isFirstRecvActivityInAwhile notes that receive activity has occurred for this
|
||||||
// endpoint and reports whether it's been at least 10 seconds since the last
|
// endpoint and reports whether it's been at least 10 seconds since the last
|
||||||
// receive activity (including having never received from this peer before).
|
// receive activity (including having never received from this peer before).
|
||||||
func (de *discoEndpoint) isFirstRecvActivityInAwhile() bool {
|
func (de *endpoint) isFirstRecvActivityInAwhile() bool {
|
||||||
now := mono.Now()
|
now := mono.Now()
|
||||||
elapsed := now.Sub(de.lastRecv.LoadAtomic())
|
elapsed := now.Sub(de.lastRecv.LoadAtomic())
|
||||||
if elapsed > 10*time.Second {
|
if elapsed > 10*time.Second {
|
||||||
@ -3384,23 +3381,23 @@ func (de *discoEndpoint) isFirstRecvActivityInAwhile() bool {
|
|||||||
// String exists purely so wireguard-go internals can log.Printf("%v")
|
// String exists purely so wireguard-go internals can log.Printf("%v")
|
||||||
// its internal conn.Endpoints and we don't end up with data races
|
// its internal conn.Endpoints and we don't end up with data races
|
||||||
// from fmt (via log) reading mutex fields and such.
|
// from fmt (via log) reading mutex fields and such.
|
||||||
func (de *discoEndpoint) String() string {
|
func (de *endpoint) String() string {
|
||||||
return fmt.Sprintf("magicsock.discoEndpoint{%v, %v}", de.publicKey.ShortString(), de.discoShort)
|
return fmt.Sprintf("magicsock.endpoint{%v, %v}", de.publicKey.ShortString(), de.discoShort)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) ClearSrc() {}
|
func (de *endpoint) ClearSrc() {}
|
||||||
func (de *discoEndpoint) SrcToString() string { panic("unused") } // unused by wireguard-go
|
func (de *endpoint) SrcToString() string { panic("unused") } // unused by wireguard-go
|
||||||
func (de *discoEndpoint) SrcIP() net.IP { panic("unused") } // unused by wireguard-go
|
func (de *endpoint) SrcIP() net.IP { panic("unused") } // unused by wireguard-go
|
||||||
func (de *discoEndpoint) DstToString() string { return de.wgEndpoint }
|
func (de *endpoint) DstToString() string { return de.wgEndpoint }
|
||||||
func (de *discoEndpoint) DstIP() net.IP { panic("unused") }
|
func (de *endpoint) DstIP() net.IP { panic("unused") }
|
||||||
func (de *discoEndpoint) DstToBytes() []byte { return packIPPort(de.fakeWGAddr) }
|
func (de *endpoint) DstToBytes() []byte { return packIPPort(de.fakeWGAddr) }
|
||||||
|
|
||||||
// canP2P reports whether this endpoint understands the disco protocol
|
// canP2P reports whether this endpoint understands the disco protocol
|
||||||
// and is expected to speak it.
|
// and is expected to speak it.
|
||||||
//
|
//
|
||||||
// As of 2021-08-25, only a few hundred pre-0.100 clients understand
|
// As of 2021-08-25, only a few hundred pre-0.100 clients understand
|
||||||
// DERP but not disco, so this returns false very rarely.
|
// DERP but not disco, so this returns false very rarely.
|
||||||
func (de *discoEndpoint) canP2P() bool {
|
func (de *endpoint) canP2P() bool {
|
||||||
return !de.discoKey.IsZero()
|
return !de.discoKey.IsZero()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3409,7 +3406,7 @@ func (de *discoEndpoint) canP2P() bool {
|
|||||||
// addr may be non-zero.
|
// addr may be non-zero.
|
||||||
//
|
//
|
||||||
// de.mu must be held.
|
// de.mu must be held.
|
||||||
func (de *discoEndpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr netaddr.IPPort) {
|
func (de *endpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr netaddr.IPPort) {
|
||||||
udpAddr = de.bestAddr.IPPort
|
udpAddr = de.bestAddr.IPPort
|
||||||
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
|
if udpAddr.IsZero() || now.After(de.trustBestAddrUntil) {
|
||||||
// We had a bestAddr but it expired so send both to it
|
// We had a bestAddr but it expired so send both to it
|
||||||
@ -3421,7 +3418,7 @@ func (de *discoEndpoint) addrForSendLocked(now mono.Time) (udpAddr, derpAddr net
|
|||||||
|
|
||||||
// heartbeat is called every heartbeatInterval to keep the best UDP path alive,
|
// heartbeat is called every heartbeatInterval to keep the best UDP path alive,
|
||||||
// or kick off discovery of other paths.
|
// or kick off discovery of other paths.
|
||||||
func (de *discoEndpoint) heartbeat() {
|
func (de *endpoint) heartbeat() {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3461,7 +3458,7 @@ func (de *discoEndpoint) heartbeat() {
|
|||||||
// a better path.
|
// a better path.
|
||||||
//
|
//
|
||||||
// de.mu must be held.
|
// de.mu must be held.
|
||||||
func (de *discoEndpoint) wantFullPingLocked(now mono.Time) bool {
|
func (de *endpoint) wantFullPingLocked(now mono.Time) bool {
|
||||||
if !de.canP2P() {
|
if !de.canP2P() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -3480,7 +3477,7 @@ func (de *discoEndpoint) wantFullPingLocked(now mono.Time) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) noteActiveLocked() {
|
func (de *endpoint) noteActiveLocked() {
|
||||||
de.lastSend = mono.Now()
|
de.lastSend = mono.Now()
|
||||||
if de.heartBeatTimer == nil && de.canP2P() {
|
if de.heartBeatTimer == nil && de.canP2P() {
|
||||||
de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat)
|
de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat)
|
||||||
@ -3489,7 +3486,7 @@ func (de *discoEndpoint) noteActiveLocked() {
|
|||||||
|
|
||||||
// cliPing starts a ping for the "tailscale ping" command. res is value to call cb with,
|
// cliPing starts a ping for the "tailscale ping" command. res is value to call cb with,
|
||||||
// already partially filled.
|
// already partially filled.
|
||||||
func (de *discoEndpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.PingResult)) {
|
func (de *endpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.PingResult)) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3514,7 +3511,7 @@ func (de *discoEndpoint) cliPing(res *ipnstate.PingResult, cb func(*ipnstate.Pin
|
|||||||
de.noteActiveLocked()
|
de.noteActiveLocked()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) send(b []byte) error {
|
func (de *endpoint) send(b []byte) error {
|
||||||
now := mono.Now()
|
now := mono.Now()
|
||||||
|
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
@ -3541,7 +3538,7 @@ func (de *discoEndpoint) send(b []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) pingTimeout(txid stun.TxID) {
|
func (de *endpoint) pingTimeout(txid stun.TxID) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
sp, ok := de.sentPing[txid]
|
sp, ok := de.sentPing[txid]
|
||||||
@ -3556,7 +3553,7 @@ func (de *discoEndpoint) pingTimeout(txid stun.TxID) {
|
|||||||
|
|
||||||
// forgetPing is called by a timer when a ping either fails to send or
|
// forgetPing is called by a timer when a ping either fails to send or
|
||||||
// has taken too long to get a pong reply.
|
// has taken too long to get a pong reply.
|
||||||
func (de *discoEndpoint) forgetPing(txid stun.TxID) {
|
func (de *endpoint) forgetPing(txid stun.TxID) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
if sp, ok := de.sentPing[txid]; ok {
|
if sp, ok := de.sentPing[txid]; ok {
|
||||||
@ -3564,7 +3561,7 @@ func (de *discoEndpoint) forgetPing(txid stun.TxID) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) removeSentPingLocked(txid stun.TxID, sp sentPing) {
|
func (de *endpoint) removeSentPingLocked(txid stun.TxID, sp sentPing) {
|
||||||
// Stop the timer for the case where sendPing failed to write to UDP.
|
// Stop the timer for the case where sendPing failed to write to UDP.
|
||||||
// In the case of a timer already having fired, this is a no-op:
|
// In the case of a timer already having fired, this is a no-op:
|
||||||
sp.timer.Stop()
|
sp.timer.Stop()
|
||||||
@ -3575,7 +3572,7 @@ func (de *discoEndpoint) removeSentPingLocked(txid stun.TxID, sp sentPing) {
|
|||||||
//
|
//
|
||||||
// The caller (startPingLocked) should've already been recorded the ping in
|
// The caller (startPingLocked) should've already been recorded the ping in
|
||||||
// sentPing and set up the timer.
|
// sentPing and set up the timer.
|
||||||
func (de *discoEndpoint) sendDiscoPing(ep netaddr.IPPort, txid stun.TxID, logLevel discoLogLevel) {
|
func (de *endpoint) sendDiscoPing(ep netaddr.IPPort, txid stun.TxID, logLevel discoLogLevel) {
|
||||||
sent, _ := de.sendDiscoMessage(ep, &disco.Ping{TxID: [12]byte(txid)}, logLevel)
|
sent, _ := de.sendDiscoMessage(ep, &disco.Ping{TxID: [12]byte(txid)}, logLevel)
|
||||||
if !sent {
|
if !sent {
|
||||||
de.forgetPing(txid)
|
de.forgetPing(txid)
|
||||||
@ -3600,7 +3597,7 @@ func (de *discoEndpoint) sendDiscoPing(ep netaddr.IPPort, txid stun.TxID, logLev
|
|||||||
pingCLI
|
pingCLI
|
||||||
)
|
)
|
||||||
|
|
||||||
func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now mono.Time, purpose discoPingPurpose) {
|
func (de *endpoint) startPingLocked(ep netaddr.IPPort, now mono.Time, purpose discoPingPurpose) {
|
||||||
if !de.canP2P() {
|
if !de.canP2P() {
|
||||||
panic("tried to disco ping a peer that can't disco")
|
panic("tried to disco ping a peer that can't disco")
|
||||||
}
|
}
|
||||||
@ -3629,7 +3626,7 @@ func (de *discoEndpoint) startPingLocked(ep netaddr.IPPort, now mono.Time, purpo
|
|||||||
go de.sendDiscoPing(ep, txid, logLevel)
|
go de.sendDiscoPing(ep, txid, logLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) sendPingsLocked(now mono.Time, sendCallMeMaybe bool) {
|
func (de *endpoint) sendPingsLocked(now mono.Time, sendCallMeMaybe bool) {
|
||||||
de.lastFullPing = now
|
de.lastFullPing = now
|
||||||
var sentAny bool
|
var sentAny bool
|
||||||
for ep, st := range de.endpointState {
|
for ep, st := range de.endpointState {
|
||||||
@ -3661,11 +3658,11 @@ func (de *discoEndpoint) sendPingsLocked(now mono.Time, sendCallMeMaybe bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) sendDiscoMessage(dst netaddr.IPPort, dm disco.Message, logLevel discoLogLevel) (sent bool, err error) {
|
func (de *endpoint) sendDiscoMessage(dst netaddr.IPPort, dm disco.Message, logLevel discoLogLevel) (sent bool, err error) {
|
||||||
return de.c.sendDiscoMessage(dst, de.publicKey, de.discoKey, dm, logLevel)
|
return de.c.sendDiscoMessage(dst, de.publicKey, de.discoKey, dm, logLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) updateFromNode(n *tailcfg.Node) {
|
func (de *endpoint) updateFromNode(n *tailcfg.Node) {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
panic("nil node when updating disco ep")
|
panic("nil node when updating disco ep")
|
||||||
}
|
}
|
||||||
@ -3712,7 +3709,7 @@ func (de *discoEndpoint) updateFromNode(n *tailcfg.Node) {
|
|||||||
//
|
//
|
||||||
// This is called once we've already verified that we got a valid
|
// This is called once we've already verified that we got a valid
|
||||||
// discovery message from de via ep.
|
// discovery message from de via ep.
|
||||||
func (de *discoEndpoint) addCandidateEndpoint(ep netaddr.IPPort) {
|
func (de *endpoint) addCandidateEndpoint(ep netaddr.IPPort) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3746,7 +3743,7 @@ func (de *discoEndpoint) addCandidateEndpoint(ep netaddr.IPPort) {
|
|||||||
// noteConnectivityChange is called when connectivity changes enough
|
// noteConnectivityChange is called when connectivity changes enough
|
||||||
// that we should question our earlier assumptions about which paths
|
// that we should question our earlier assumptions about which paths
|
||||||
// work.
|
// work.
|
||||||
func (de *discoEndpoint) noteConnectivityChange() {
|
func (de *endpoint) noteConnectivityChange() {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3755,7 +3752,7 @@ func (de *discoEndpoint) noteConnectivityChange() {
|
|||||||
|
|
||||||
// handlePongConnLocked handles a Pong message (a reply to an earlier ping).
|
// handlePongConnLocked handles a Pong message (a reply to an earlier ping).
|
||||||
// It should be called with the Conn.mu held.
|
// It should be called with the Conn.mu held.
|
||||||
func (de *discoEndpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort) {
|
func (de *endpoint) handlePongConnLocked(m *disco.Pong, src netaddr.IPPort) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3849,7 +3846,7 @@ func betterAddr(a, b addrLatency) bool {
|
|||||||
return a.latency < b.latency
|
return a.latency < b.latency
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoEndpoint.mu must be held.
|
// endpoint.mu must be held.
|
||||||
func (st *endpointState) addPongReplyLocked(r pongReply) {
|
func (st *endpointState) addPongReplyLocked(r pongReply) {
|
||||||
if n := len(st.recentPongs); n < pongHistoryCount {
|
if n := len(st.recentPongs); n < pongHistoryCount {
|
||||||
st.recentPong = uint16(n)
|
st.recentPong = uint16(n)
|
||||||
@ -3868,7 +3865,7 @@ func (st *endpointState) addPongReplyLocked(r pongReply) {
|
|||||||
// DERP. The contract for use of this message is that the peer has
|
// DERP. The contract for use of this message is that the peer has
|
||||||
// already sent to us via UDP, so their stateful firewall should be
|
// already sent to us via UDP, so their stateful firewall should be
|
||||||
// open. Now we can Ping back and make it through.
|
// open. Now we can Ping back and make it through.
|
||||||
func (de *discoEndpoint) handleCallMeMaybe(m *disco.CallMeMaybe) {
|
func (de *endpoint) handleCallMeMaybe(m *disco.CallMeMaybe) {
|
||||||
if !de.canP2P() {
|
if !de.canP2P() {
|
||||||
// How did we receive a disco message from a peer that can't disco?
|
// How did we receive a disco message from a peer that can't disco?
|
||||||
panic("got call-me-maybe from peer with no discokey")
|
panic("got call-me-maybe from peer with no discokey")
|
||||||
@ -3929,7 +3926,7 @@ func (de *discoEndpoint) handleCallMeMaybe(m *disco.CallMeMaybe) {
|
|||||||
de.sendPingsLocked(mono.Now(), false)
|
de.sendPingsLocked(mono.Now(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) populatePeerStatus(ps *ipnstate.PeerStatus) {
|
func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) {
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
|
|
||||||
@ -3950,7 +3947,7 @@ func (de *discoEndpoint) populatePeerStatus(ps *ipnstate.PeerStatus) {
|
|||||||
// It's called when a discovery endpoint is no longer present in the
|
// It's called when a discovery endpoint is no longer present in the
|
||||||
// NetworkMap, or when magicsock is transitioning from running to
|
// NetworkMap, or when magicsock is transitioning from running to
|
||||||
// stopped state (via SetPrivateKey(zero))
|
// stopped state (via SetPrivateKey(zero))
|
||||||
func (de *discoEndpoint) stopAndReset() {
|
func (de *endpoint) stopAndReset() {
|
||||||
atomic.AddInt64(&de.numStopAndResetAtomic, 1)
|
atomic.AddInt64(&de.numStopAndResetAtomic, 1)
|
||||||
de.mu.Lock()
|
de.mu.Lock()
|
||||||
defer de.mu.Unlock()
|
defer de.mu.Unlock()
|
||||||
@ -3978,7 +3975,7 @@ func (de *discoEndpoint) stopAndReset() {
|
|||||||
de.pendingCLIPings = nil
|
de.pendingCLIPings = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (de *discoEndpoint) numStopAndReset() int64 {
|
func (de *endpoint) numStopAndReset() int64 {
|
||||||
return atomic.LoadInt64(&de.numStopAndResetAtomic)
|
return atomic.LoadInt64(&de.numStopAndResetAtomic)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3990,5 +3987,5 @@ func derpStr(s string) string { return strings.ReplaceAll(s, "127.3.3.40:", "der
|
|||||||
type ippEndpointCache struct {
|
type ippEndpointCache struct {
|
||||||
ipp netaddr.IPPort
|
ipp netaddr.IPPort
|
||||||
gen int64
|
gen int64
|
||||||
de *discoEndpoint
|
de *endpoint
|
||||||
}
|
}
|
||||||
|
@ -1051,7 +1051,7 @@ func TestDiscoMessage(t *testing.T) {
|
|||||||
DiscoKey: peer1Pub,
|
DiscoKey: peer1Pub,
|
||||||
}
|
}
|
||||||
c.peerMap.upsertNode(n)
|
c.peerMap.upsertNode(n)
|
||||||
c.peerMap.upsertDiscoEndpoint(&discoEndpoint{
|
c.peerMap.upsertDiscoEndpoint(&endpoint{
|
||||||
publicKey: n.Key,
|
publicKey: n.Key,
|
||||||
discoKey: n.DiscoKey,
|
discoKey: n.DiscoKey,
|
||||||
})
|
})
|
||||||
@ -1071,12 +1071,12 @@ func TestDiscoMessage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// tests that having a discoEndpoint.String prevents wireguard-go's
|
// tests that having a endpoint.String prevents wireguard-go's
|
||||||
// log.Printf("%v") of its conn.Endpoint values from using reflect to
|
// log.Printf("%v") of its conn.Endpoint values from using reflect to
|
||||||
// walk into read mutex while they're being used and then causing data
|
// walk into read mutex while they're being used and then causing data
|
||||||
// races.
|
// races.
|
||||||
func TestDiscoStringLogRace(t *testing.T) {
|
func TestDiscoStringLogRace(t *testing.T) {
|
||||||
de := new(discoEndpoint)
|
de := new(endpoint)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
go func() {
|
go func() {
|
||||||
@ -1091,10 +1091,10 @@ func TestDiscoStringLogRace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Test32bitAlignment(t *testing.T) {
|
func Test32bitAlignment(t *testing.T) {
|
||||||
var de discoEndpoint
|
var de endpoint
|
||||||
|
|
||||||
if off := unsafe.Offsetof(de.lastRecv); off%8 != 0 {
|
if off := unsafe.Offsetof(de.lastRecv); off%8 != 0 {
|
||||||
t.Fatalf("discoEndpoint.lastRecv is not 8-byte aligned")
|
t.Fatalf("endpoint.lastRecv is not 8-byte aligned")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !de.isFirstRecvActivityInAwhile() { // verify this doesn't panic on 32-bit
|
if !de.isFirstRecvActivityInAwhile() { // verify this doesn't panic on 32-bit
|
||||||
@ -1350,7 +1350,7 @@ func TestSetNetworkMapChangingNodeKey(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
de, ok := conn.peerMap.discoEndpointForDiscoKey(discoKey)
|
de, ok := conn.peerMap.endpointForDiscoKey(discoKey)
|
||||||
if ok && de.publicKey != nodeKey2 {
|
if ok && de.publicKey != nodeKey2 {
|
||||||
t.Fatalf("discoEndpoint public key = %q; want %q", de.publicKey[:], nodeKey2[:])
|
t.Fatalf("discoEndpoint public key = %q; want %q", de.publicKey[:], nodeKey2[:])
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user