2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-06-15 18:58:10 +00:00
|
|
|
|
|
|
|
package derphttp
|
|
|
|
|
|
|
|
import (
|
2021-02-12 18:58:43 +00:00
|
|
|
"context"
|
2020-06-15 18:58:10 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"tailscale.com/derp"
|
|
|
|
"tailscale.com/types/key"
|
2021-02-12 18:58:43 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-06-15 18:58:10 +00:00
|
|
|
)
|
|
|
|
|
2023-10-25 20:41:24 +00:00
|
|
|
var retryInterval = 5 * time.Second
|
|
|
|
|
2023-11-04 02:42:52 +00:00
|
|
|
// testHookWatchLookConnectResult, if non-nil for tests, is called by RunWatchConnectionLoop
|
|
|
|
// with the connect result. If it returns false, the loop ends.
|
|
|
|
var testHookWatchLookConnectResult func(connectError error, wasSelfConnect bool) (keepRunning bool)
|
|
|
|
|
2023-10-25 18:59:06 +00:00
|
|
|
// RunWatchConnectionLoop loops until ctx is done, sending
|
|
|
|
// WatchConnectionChanges and subscribing to connection changes.
|
2020-06-15 18:58:10 +00:00
|
|
|
//
|
2023-10-25 18:59:06 +00:00
|
|
|
// If the server's public key is ignoreServerKey, RunWatchConnectionLoop
|
|
|
|
// returns.
|
2020-06-15 18:58:10 +00:00
|
|
|
//
|
|
|
|
// Otherwise, the add and remove funcs are called as clients come & go.
|
2024-09-24 22:11:31 +00:00
|
|
|
// Note that add is called for every new connection and remove is only
|
|
|
|
// called for the final disconnection. See https://github.com/tailscale/tailscale/issues/13566.
|
|
|
|
// This behavior will likely change. Callers should do their own accounting
|
|
|
|
// and dup suppression as needed.
|
2021-02-12 18:58:43 +00:00
|
|
|
//
|
2023-10-25 18:59:06 +00:00
|
|
|
// infoLogf, if non-nil, is the logger to write periodic status updates about
|
|
|
|
// how many peers are on the server. Error log output is set to the c's logger,
|
|
|
|
// regardless of infoLogf's value.
|
2021-02-12 18:58:43 +00:00
|
|
|
//
|
2023-10-25 18:59:06 +00:00
|
|
|
// To force RunWatchConnectionLoop to return quickly, its ctx needs to be
|
|
|
|
// closed, and c itself needs to be closed.
|
|
|
|
//
|
2024-06-25 15:04:12 +00:00
|
|
|
// It is a fatal error to call this on an already-started Client without having
|
2023-10-25 18:59:06 +00:00
|
|
|
// initialized Client.WatchConnectionChanges to true.
|
2024-06-25 15:04:12 +00:00
|
|
|
//
|
|
|
|
// If the DERP connection breaks and reconnects, remove will be called for all
|
|
|
|
// previously seen peers, with Reason type PeerGoneReasonSynthetic. Those
|
|
|
|
// clients are likely still connected and their add message will appear after
|
|
|
|
// reconnect.
|
|
|
|
func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key.NodePublic, infoLogf logger.Logf, add func(derp.PeerPresentMessage), remove func(derp.PeerGoneMessage)) {
|
2023-10-25 18:59:06 +00:00
|
|
|
if !c.WatchConnectionChanges {
|
|
|
|
if c.isStarted() {
|
|
|
|
panic("invalid use of RunWatchConnectionLoop on already-started Client without setting Client.RunWatchConnectionLoop")
|
|
|
|
}
|
|
|
|
c.WatchConnectionChanges = true
|
|
|
|
}
|
2021-02-12 18:58:43 +00:00
|
|
|
if infoLogf == nil {
|
|
|
|
infoLogf = logger.Discard
|
|
|
|
}
|
2020-06-15 18:58:10 +00:00
|
|
|
logf := c.logf
|
|
|
|
const statusInterval = 10 * time.Second
|
|
|
|
var (
|
|
|
|
mu sync.Mutex
|
2021-10-28 22:42:50 +00:00
|
|
|
present = map[key.NodePublic]bool{}
|
2020-06-15 18:58:10 +00:00
|
|
|
loggedConnected = false
|
|
|
|
)
|
|
|
|
clear := func() {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if len(present) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logf("reconnected; clearing %d forwarding mappings", len(present))
|
|
|
|
for k := range present {
|
2024-06-25 15:04:12 +00:00
|
|
|
remove(derp.PeerGoneMessage{Peer: k, Reason: derp.PeerGoneReasonMeshConnBroke})
|
2020-06-15 18:58:10 +00:00
|
|
|
}
|
2021-10-28 22:42:50 +00:00
|
|
|
present = map[key.NodePublic]bool{}
|
2020-06-15 18:58:10 +00:00
|
|
|
}
|
|
|
|
lastConnGen := 0
|
2023-07-27 19:56:33 +00:00
|
|
|
lastStatus := c.clock.Now()
|
2020-06-15 18:58:10 +00:00
|
|
|
logConnectedLocked := func() {
|
|
|
|
if loggedConnected {
|
|
|
|
return
|
|
|
|
}
|
2021-02-12 18:58:43 +00:00
|
|
|
infoLogf("connected; %d peers", len(present))
|
2020-06-15 18:58:10 +00:00
|
|
|
loggedConnected = true
|
|
|
|
}
|
|
|
|
|
|
|
|
const logConnectedDelay = 200 * time.Millisecond
|
2023-07-27 19:56:33 +00:00
|
|
|
timer := c.clock.AfterFunc(2*time.Second, func() {
|
2020-06-15 18:58:10 +00:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
logConnectedLocked()
|
|
|
|
})
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2024-06-25 15:04:12 +00:00
|
|
|
updatePeer := func(k key.NodePublic, isPresent bool) {
|
2020-06-15 18:58:10 +00:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if isPresent {
|
|
|
|
present[k] = true
|
|
|
|
if !loggedConnected {
|
|
|
|
timer.Reset(logConnectedDelay)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we got a peerGone message, that means the initial connection's
|
|
|
|
// flood of peerPresent messages is done, so we can log already:
|
|
|
|
logConnectedLocked()
|
|
|
|
delete(present, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-12 18:58:43 +00:00
|
|
|
sleep := func(d time.Duration) {
|
2023-07-27 19:56:33 +00:00
|
|
|
t, tChannel := c.clock.NewTimer(d)
|
2021-02-12 18:58:43 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
t.Stop()
|
2023-07-27 19:56:33 +00:00
|
|
|
case <-tChannel:
|
2021-02-12 18:58:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for ctx.Err() == nil {
|
2023-11-04 02:42:52 +00:00
|
|
|
// Make sure we're connected before calling s.ServerPublicKey.
|
|
|
|
_, _, err := c.connect(ctx, "RunWatchConnectionLoop")
|
|
|
|
if err != nil {
|
|
|
|
if f := testHookWatchLookConnectResult; f != nil && !f(err, false) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logf("mesh connect: %v", err)
|
|
|
|
sleep(retryInterval)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
selfConnect := c.ServerPublicKey() == ignoreServerKey
|
|
|
|
if f := testHookWatchLookConnectResult; f != nil && !f(err, selfConnect) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if selfConnect {
|
2020-06-15 18:58:10 +00:00
|
|
|
logf("detected self-connect; ignoring host")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
m, connGen, err := c.RecvDetail()
|
|
|
|
if err != nil {
|
|
|
|
clear()
|
|
|
|
logf("Recv: %v", err)
|
2021-02-12 18:58:43 +00:00
|
|
|
sleep(retryInterval)
|
2020-06-15 18:58:10 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
if connGen != lastConnGen {
|
|
|
|
lastConnGen = connGen
|
|
|
|
clear()
|
|
|
|
}
|
|
|
|
switch m := m.(type) {
|
|
|
|
case derp.PeerPresentMessage:
|
2024-06-25 15:04:12 +00:00
|
|
|
add(m)
|
|
|
|
updatePeer(m.Key, true)
|
2020-06-15 18:58:10 +00:00
|
|
|
case derp.PeerGoneMessage:
|
2023-03-25 02:11:48 +00:00
|
|
|
switch m.Reason {
|
|
|
|
case derp.PeerGoneReasonDisconnected:
|
|
|
|
// Normal case, log nothing
|
|
|
|
case derp.PeerGoneReasonNotHere:
|
|
|
|
logf("Recv: peer %s not connected to %s",
|
|
|
|
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString())
|
|
|
|
default:
|
|
|
|
logf("Recv: peer %s not at server %s for unknown reason %v",
|
|
|
|
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString(), m.Reason)
|
|
|
|
}
|
2024-06-25 15:04:12 +00:00
|
|
|
remove(m)
|
|
|
|
updatePeer(m.Peer, false)
|
2020-06-15 18:58:10 +00:00
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
2023-07-27 19:56:33 +00:00
|
|
|
if now := c.clock.Now(); now.Sub(lastStatus) > statusInterval {
|
2020-06-15 18:58:10 +00:00
|
|
|
lastStatus = now
|
2021-02-12 18:58:43 +00:00
|
|
|
infoLogf("%d peers", len(present))
|
2020-06-15 18:58:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|