2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-06-15 18:58:10 +00:00
|
|
|
|
|
|
|
package derphttp
|
|
|
|
|
|
|
|
import (
|
2021-02-12 18:58:43 +00:00
|
|
|
"context"
|
2023-08-16 02:35:24 +00:00
|
|
|
"net/netip"
|
2020-06-15 18:58:10 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"tailscale.com/derp"
|
|
|
|
"tailscale.com/types/key"
|
2021-02-12 18:58:43 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-06-15 18:58:10 +00:00
|
|
|
)
|
|
|
|
|
2021-02-12 18:58:43 +00:00
|
|
|
// RunWatchConnectionLoop loops until ctx is done, sending WatchConnectionChanges and subscribing to
|
2020-06-15 18:58:10 +00:00
|
|
|
// connection changes.
|
|
|
|
//
|
|
|
|
// If the server's public key is ignoreServerKey, RunWatchConnectionLoop returns.
|
|
|
|
//
|
|
|
|
// Otherwise, the add and remove funcs are called as clients come & go.
|
2021-02-12 18:58:43 +00:00
|
|
|
//
|
|
|
|
// infoLogf, if non-nil, is the logger to write periodic status
|
|
|
|
// updates about how many peers are on the server. Error log output is
|
|
|
|
// set to the c's logger, regardless of infoLogf's value.
|
|
|
|
//
|
|
|
|
// To force RunWatchConnectionLoop to return quickly, its ctx needs to
|
|
|
|
// be closed, and c itself needs to be closed.
|
2023-08-16 02:35:24 +00:00
|
|
|
func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key.NodePublic, infoLogf logger.Logf, add func(key.NodePublic, netip.AddrPort), remove func(key.NodePublic)) {
|
2021-02-12 18:58:43 +00:00
|
|
|
if infoLogf == nil {
|
|
|
|
infoLogf = logger.Discard
|
|
|
|
}
|
2020-06-15 18:58:10 +00:00
|
|
|
logf := c.logf
|
|
|
|
const retryInterval = 5 * time.Second
|
|
|
|
const statusInterval = 10 * time.Second
|
|
|
|
var (
|
|
|
|
mu sync.Mutex
|
2021-10-28 22:42:50 +00:00
|
|
|
present = map[key.NodePublic]bool{}
|
2020-06-15 18:58:10 +00:00
|
|
|
loggedConnected = false
|
|
|
|
)
|
|
|
|
clear := func() {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if len(present) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logf("reconnected; clearing %d forwarding mappings", len(present))
|
|
|
|
for k := range present {
|
|
|
|
remove(k)
|
|
|
|
}
|
2021-10-28 22:42:50 +00:00
|
|
|
present = map[key.NodePublic]bool{}
|
2020-06-15 18:58:10 +00:00
|
|
|
}
|
|
|
|
lastConnGen := 0
|
2023-07-27 19:56:33 +00:00
|
|
|
lastStatus := c.clock.Now()
|
2020-06-15 18:58:10 +00:00
|
|
|
logConnectedLocked := func() {
|
|
|
|
if loggedConnected {
|
|
|
|
return
|
|
|
|
}
|
2021-02-12 18:58:43 +00:00
|
|
|
infoLogf("connected; %d peers", len(present))
|
2020-06-15 18:58:10 +00:00
|
|
|
loggedConnected = true
|
|
|
|
}
|
|
|
|
|
|
|
|
const logConnectedDelay = 200 * time.Millisecond
|
2023-07-27 19:56:33 +00:00
|
|
|
timer := c.clock.AfterFunc(2*time.Second, func() {
|
2020-06-15 18:58:10 +00:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
logConnectedLocked()
|
|
|
|
})
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2023-08-16 02:35:24 +00:00
|
|
|
updatePeer := func(k key.NodePublic, ipPort netip.AddrPort, isPresent bool) {
|
2020-06-15 18:58:10 +00:00
|
|
|
if isPresent {
|
2023-08-16 02:35:24 +00:00
|
|
|
add(k, ipPort)
|
2020-06-15 18:58:10 +00:00
|
|
|
} else {
|
|
|
|
remove(k)
|
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if isPresent {
|
|
|
|
present[k] = true
|
|
|
|
if !loggedConnected {
|
|
|
|
timer.Reset(logConnectedDelay)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we got a peerGone message, that means the initial connection's
|
|
|
|
// flood of peerPresent messages is done, so we can log already:
|
|
|
|
logConnectedLocked()
|
|
|
|
delete(present, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-12 18:58:43 +00:00
|
|
|
sleep := func(d time.Duration) {
|
2023-07-27 19:56:33 +00:00
|
|
|
t, tChannel := c.clock.NewTimer(d)
|
2021-02-12 18:58:43 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
t.Stop()
|
2023-07-27 19:56:33 +00:00
|
|
|
case <-tChannel:
|
2021-02-12 18:58:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for ctx.Err() == nil {
|
2020-06-15 18:58:10 +00:00
|
|
|
err := c.WatchConnectionChanges()
|
|
|
|
if err != nil {
|
|
|
|
clear()
|
|
|
|
logf("WatchConnectionChanges: %v", err)
|
2021-02-12 18:58:43 +00:00
|
|
|
sleep(retryInterval)
|
2020-06-15 18:58:10 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.ServerPublicKey() == ignoreServerKey {
|
|
|
|
logf("detected self-connect; ignoring host")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
m, connGen, err := c.RecvDetail()
|
|
|
|
if err != nil {
|
|
|
|
clear()
|
|
|
|
logf("Recv: %v", err)
|
2021-02-12 18:58:43 +00:00
|
|
|
sleep(retryInterval)
|
2020-06-15 18:58:10 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
if connGen != lastConnGen {
|
|
|
|
lastConnGen = connGen
|
|
|
|
clear()
|
|
|
|
}
|
|
|
|
switch m := m.(type) {
|
|
|
|
case derp.PeerPresentMessage:
|
2023-08-16 02:35:24 +00:00
|
|
|
updatePeer(m.Key, m.IPPort, true)
|
2020-06-15 18:58:10 +00:00
|
|
|
case derp.PeerGoneMessage:
|
2023-03-25 02:11:48 +00:00
|
|
|
switch m.Reason {
|
|
|
|
case derp.PeerGoneReasonDisconnected:
|
|
|
|
// Normal case, log nothing
|
|
|
|
case derp.PeerGoneReasonNotHere:
|
|
|
|
logf("Recv: peer %s not connected to %s",
|
|
|
|
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString())
|
|
|
|
default:
|
|
|
|
logf("Recv: peer %s not at server %s for unknown reason %v",
|
|
|
|
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString(), m.Reason)
|
|
|
|
}
|
2023-08-16 02:35:24 +00:00
|
|
|
updatePeer(key.NodePublic(m.Peer), netip.AddrPort{}, false)
|
2020-06-15 18:58:10 +00:00
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
2023-07-27 19:56:33 +00:00
|
|
|
if now := c.clock.Now(); now.Sub(lastStatus) > statusInterval {
|
2020-06-15 18:58:10 +00:00
|
|
|
lastStatus = now
|
2021-02-12 18:58:43 +00:00
|
|
|
infoLogf("%d peers", len(present))
|
2020-06-15 18:58:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|