2023-06-21 11:29:52 +02:00
|
|
|
package notifier
|
|
|
|
|
|
|
|
import (
|
2024-02-08 17:28:19 +01:00
|
|
|
"context"
|
2023-12-09 18:09:24 +01:00
|
|
|
"fmt"
|
2024-04-27 10:47:39 +02:00
|
|
|
"sort"
|
2023-12-09 18:09:24 +01:00
|
|
|
"strings"
|
2023-06-21 11:29:52 +02:00
|
|
|
"sync"
|
2024-04-21 18:28:17 +02:00
|
|
|
"time"
|
2023-06-21 11:29:52 +02:00
|
|
|
|
2023-06-29 11:20:22 +01:00
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
2024-04-21 18:28:17 +02:00
|
|
|
"github.com/puzpuzpuz/xsync/v3"
|
2023-07-24 08:58:51 +02:00
|
|
|
"github.com/rs/zerolog/log"
|
2024-05-24 09:15:34 +01:00
|
|
|
"github.com/sasha-s/go-deadlock"
|
|
|
|
"tailscale.com/envknob"
|
2024-04-27 10:47:39 +02:00
|
|
|
"tailscale.com/tailcfg"
|
|
|
|
"tailscale.com/util/set"
|
2023-06-21 11:29:52 +02:00
|
|
|
)
|
|
|
|
|
2024-07-22 08:56:00 +02:00
|
|
|
var (
|
|
|
|
debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK")
|
|
|
|
debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT")
|
|
|
|
)
|
2024-05-24 09:15:34 +01:00
|
|
|
|
|
|
|
func init() {
|
|
|
|
deadlock.Opts.Disable = !debugDeadlock
|
|
|
|
if debugDeadlock {
|
|
|
|
deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout()
|
|
|
|
deadlock.Opts.PrintAllCurrentGoroutines = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-21 11:29:52 +02:00
|
|
|
type Notifier struct {
|
2024-05-24 09:15:34 +01:00
|
|
|
l deadlock.Mutex
|
2024-02-23 10:59:24 +01:00
|
|
|
nodes map[types.NodeID]chan<- types.StateUpdate
|
2024-04-21 18:28:17 +02:00
|
|
|
connected *xsync.MapOf[types.NodeID, bool]
|
2024-04-27 10:47:39 +02:00
|
|
|
b *batcher
|
2024-05-24 09:15:34 +01:00
|
|
|
cfg *types.Config
|
2024-09-09 14:10:22 +02:00
|
|
|
closed bool
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2024-04-27 10:47:39 +02:00
|
|
|
func NewNotifier(cfg *types.Config) *Notifier {
|
|
|
|
n := &Notifier{
|
2024-02-23 10:59:24 +01:00
|
|
|
nodes: make(map[types.NodeID]chan<- types.StateUpdate),
|
2024-04-21 18:28:17 +02:00
|
|
|
connected: xsync.NewMapOf[types.NodeID, bool](),
|
2024-05-24 09:15:34 +01:00
|
|
|
cfg: cfg,
|
2024-09-09 14:10:22 +02:00
|
|
|
closed: false,
|
2024-02-08 17:28:19 +01:00
|
|
|
}
|
2024-04-27 10:47:39 +02:00
|
|
|
b := newBatcher(cfg.Tuning.BatchChangeDelay, n)
|
|
|
|
n.b = b
|
2024-05-24 09:15:34 +01:00
|
|
|
|
2024-04-27 10:47:39 +02:00
|
|
|
go b.doWork()
|
|
|
|
return n
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2024-09-09 14:10:22 +02:00
|
|
|
// Close stops the batcher and closes all channels.
|
2024-05-02 13:39:19 +02:00
|
|
|
func (n *Notifier) Close() {
|
2024-09-09 14:10:22 +02:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "close").Inc()
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "close").Dec()
|
|
|
|
|
|
|
|
n.closed = true
|
2024-05-02 13:39:19 +02:00
|
|
|
n.b.close()
|
2024-09-09 14:10:22 +02:00
|
|
|
|
|
|
|
for _, c := range n.nodes {
|
|
|
|
close(c)
|
|
|
|
}
|
2024-05-02 13:39:19 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
func (n *Notifier) tracef(nID types.NodeID, msg string, args ...any) {
|
|
|
|
log.Trace().
|
|
|
|
Uint64("node.id", nID.Uint64()).
|
|
|
|
Int("open_chans", len(n.nodes)).Msgf(msg, args...)
|
|
|
|
}
|
2023-09-11 06:08:44 -05:00
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) {
|
2024-04-21 18:28:17 +02:00
|
|
|
start := time.Now()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "add").Inc()
|
2023-06-21 11:29:52 +02:00
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "add").Dec()
|
2024-04-21 18:28:17 +02:00
|
|
|
notifierWaitForLock.WithLabelValues("add").Observe(time.Since(start).Seconds())
|
2023-06-21 11:29:52 +02:00
|
|
|
|
2024-09-09 14:10:22 +02:00
|
|
|
if n.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
// If a channel exists, it means the node has opened a new
|
|
|
|
// connection. Close the old channel and replace it.
|
|
|
|
if curr, ok := n.nodes[nodeID]; ok {
|
|
|
|
n.tracef(nodeID, "channel present, closing and replacing")
|
|
|
|
close(curr)
|
|
|
|
}
|
|
|
|
|
2024-02-23 10:59:24 +01:00
|
|
|
n.nodes[nodeID] = c
|
2024-04-21 18:28:17 +02:00
|
|
|
n.connected.Store(nodeID, true)
|
2023-07-24 08:58:51 +02:00
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
n.tracef(nodeID, "added new channel")
|
2024-04-21 18:28:17 +02:00
|
|
|
notifierNodeUpdateChans.Inc()
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
// RemoveNode removes a node and a given channel from the notifier.
|
|
|
|
// It checks that the channel is the same as currently being updated
|
|
|
|
// and ignores the removal if it is not.
|
|
|
|
// RemoveNode reports if the node/chan was removed.
|
|
|
|
func (n *Notifier) RemoveNode(nodeID types.NodeID, c chan<- types.StateUpdate) bool {
|
2024-04-21 18:28:17 +02:00
|
|
|
start := time.Now()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "remove").Inc()
|
2023-06-21 11:29:52 +02:00
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "remove").Dec()
|
2024-04-21 18:28:17 +02:00
|
|
|
notifierWaitForLock.WithLabelValues("remove").Observe(time.Since(start).Seconds())
|
2023-06-21 11:29:52 +02:00
|
|
|
|
2024-09-09 14:10:22 +02:00
|
|
|
if n.closed {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-02-08 17:28:19 +01:00
|
|
|
if len(n.nodes) == 0 {
|
2024-05-24 09:15:34 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel exist, but it does not belong
|
|
|
|
// to the caller, ignore.
|
|
|
|
if curr, ok := n.nodes[nodeID]; ok {
|
|
|
|
if curr != c {
|
|
|
|
n.tracef(nodeID, "channel has been replaced, not removing")
|
|
|
|
return false
|
|
|
|
}
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2024-02-23 10:59:24 +01:00
|
|
|
delete(n.nodes, nodeID)
|
2024-04-21 18:28:17 +02:00
|
|
|
n.connected.Store(nodeID, false)
|
2023-07-24 08:58:51 +02:00
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
n.tracef(nodeID, "removed channel")
|
2024-04-21 18:28:17 +02:00
|
|
|
notifierNodeUpdateChans.Dec()
|
2024-05-24 09:15:34 +01:00
|
|
|
|
|
|
|
return true
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2023-12-09 18:09:24 +01:00
|
|
|
// IsConnected reports if a node is connected to headscale and has a
|
|
|
|
// poll session open.
|
2024-02-23 10:59:24 +01:00
|
|
|
func (n *Notifier) IsConnected(nodeID types.NodeID) bool {
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "conncheck").Inc()
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "conncheck").Dec()
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2024-04-21 18:28:17 +02:00
|
|
|
if val, ok := n.connected.Load(nodeID); ok {
|
|
|
|
return val
|
|
|
|
}
|
|
|
|
return false
|
2024-02-23 10:59:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsLikelyConnected reports if a node is connected to headscale and has a
|
|
|
|
// poll session open, but doesnt lock, so might be wrong.
|
|
|
|
func (n *Notifier) IsLikelyConnected(nodeID types.NodeID) bool {
|
2024-04-21 18:28:17 +02:00
|
|
|
if val, ok := n.connected.Load(nodeID); ok {
|
|
|
|
return val
|
|
|
|
}
|
|
|
|
return false
|
2024-02-08 17:28:19 +01:00
|
|
|
}
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2024-04-21 18:28:17 +02:00
|
|
|
func (n *Notifier) LikelyConnectedMap() *xsync.MapOf[types.NodeID, bool] {
|
2024-02-08 17:28:19 +01:00
|
|
|
return n.connected
|
2023-12-09 18:09:24 +01:00
|
|
|
}
|
|
|
|
|
2024-02-08 17:28:19 +01:00
|
|
|
func (n *Notifier) NotifyAll(ctx context.Context, update types.StateUpdate) {
|
|
|
|
n.NotifyWithIgnore(ctx, update)
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
|
|
|
|
2024-02-08 17:28:19 +01:00
|
|
|
func (n *Notifier) NotifyWithIgnore(
|
|
|
|
ctx context.Context,
|
|
|
|
update types.StateUpdate,
|
2024-02-23 10:59:24 +01:00
|
|
|
ignoreNodeIDs ...types.NodeID,
|
2024-02-08 17:28:19 +01:00
|
|
|
) {
|
2024-09-09 14:10:22 +02:00
|
|
|
if n.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-27 10:47:39 +02:00
|
|
|
notifierUpdateReceived.WithLabelValues(update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
|
|
|
n.b.addOrPassthrough(update)
|
2023-06-21 11:29:52 +02:00
|
|
|
}
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2024-04-21 18:28:17 +02:00
|
|
|
func (n *Notifier) NotifyByNodeID(
|
2024-02-08 17:28:19 +01:00
|
|
|
ctx context.Context,
|
|
|
|
update types.StateUpdate,
|
2024-02-23 10:59:24 +01:00
|
|
|
nodeID types.NodeID,
|
2024-02-08 17:28:19 +01:00
|
|
|
) {
|
2024-04-21 18:28:17 +02:00
|
|
|
start := time.Now()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "notify").Inc()
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "notify").Dec()
|
2024-04-21 18:28:17 +02:00
|
|
|
notifierWaitForLock.WithLabelValues("notify").Observe(time.Since(start).Seconds())
|
2024-01-05 10:41:56 +01:00
|
|
|
|
2024-09-09 14:10:22 +02:00
|
|
|
if n.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-02-23 10:59:24 +01:00
|
|
|
if c, ok := n.nodes[nodeID]; ok {
|
2024-02-08 17:28:19 +01:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Error().
|
|
|
|
Err(ctx.Err()).
|
2024-02-23 10:59:24 +01:00
|
|
|
Uint64("node.id", nodeID.Uint64()).
|
2024-04-27 10:47:39 +02:00
|
|
|
Any("origin", types.NotifyOriginKey.Value(ctx)).
|
|
|
|
Any("origin-hostname", types.NotifyHostnameKey.Value(ctx)).
|
2024-02-08 17:28:19 +01:00
|
|
|
Msgf("update not sent, context cancelled")
|
2024-05-24 09:15:34 +01:00
|
|
|
if debugHighCardinalityMetrics {
|
|
|
|
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc()
|
|
|
|
} else {
|
|
|
|
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
|
|
|
}
|
2024-02-08 17:28:19 +01:00
|
|
|
|
|
|
|
return
|
|
|
|
case c <- update:
|
2024-05-24 09:15:34 +01:00
|
|
|
n.tracef(nodeID, "update successfully sent on chan, origin: %s, origin-hostname: %s", ctx.Value("origin"), ctx.Value("hostname"))
|
|
|
|
if debugHighCardinalityMetrics {
|
|
|
|
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc()
|
|
|
|
} else {
|
|
|
|
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc()
|
|
|
|
}
|
2024-02-08 17:28:19 +01:00
|
|
|
}
|
2024-01-05 10:41:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-27 10:47:39 +02:00
|
|
|
func (n *Notifier) sendAll(update types.StateUpdate) {
|
|
|
|
start := time.Now()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "send-all").Inc()
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "send-all").Dec()
|
2024-04-27 10:47:39 +02:00
|
|
|
notifierWaitForLock.WithLabelValues("send-all").Observe(time.Since(start).Seconds())
|
|
|
|
|
2024-09-09 14:10:22 +02:00
|
|
|
if n.closed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
for id, c := range n.nodes {
|
|
|
|
// Whenever an update is sent to all nodes, there is a chance that the node
|
|
|
|
// has disconnected and the goroutine that was supposed to consume the update
|
|
|
|
// has shut down the channel and is waiting for the lock held here in RemoveNode.
|
|
|
|
// This means that there is potential for a deadlock which would stop all updates
|
|
|
|
// going out to clients. This timeout prevents that from happening by moving on to the
|
|
|
|
// next node if the context is cancelled. Afther sendAll releases the lock, the add/remove
|
|
|
|
// call will succeed and the update will go to the correct nodes on the next call.
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), n.cfg.Tuning.NotifierSendTimeout)
|
|
|
|
defer cancel()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Error().
|
|
|
|
Err(ctx.Err()).
|
|
|
|
Uint64("node.id", id.Uint64()).
|
|
|
|
Msgf("update not sent, context cancelled")
|
|
|
|
if debugHighCardinalityMetrics {
|
|
|
|
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all", id.String()).Inc()
|
|
|
|
} else {
|
|
|
|
notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all").Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
case c <- update:
|
|
|
|
if debugHighCardinalityMetrics {
|
|
|
|
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all", id.String()).Inc()
|
|
|
|
} else {
|
|
|
|
notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all").Inc()
|
|
|
|
}
|
|
|
|
}
|
2024-04-27 10:47:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-09 18:09:24 +01:00
|
|
|
func (n *Notifier) String() string {
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "string").Inc()
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
notifierWaitersForLock.WithLabelValues("lock", "string").Dec()
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2024-04-10 15:35:09 +02:00
|
|
|
var b strings.Builder
|
2024-05-24 09:15:34 +01:00
|
|
|
fmt.Fprintf(&b, "chans (%d):\n", len(n.nodes))
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
var keys []types.NodeID
|
|
|
|
n.connected.Range(func(key types.NodeID, value bool) bool {
|
|
|
|
keys = append(keys, key)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
sort.Slice(keys, func(i, j int) bool {
|
|
|
|
return keys[i] < keys[j]
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, key := range keys {
|
|
|
|
fmt.Fprintf(&b, "\t%d: %p\n", key, n.nodes[key])
|
2023-12-09 18:09:24 +01:00
|
|
|
}
|
|
|
|
|
2024-04-10 15:35:09 +02:00
|
|
|
b.WriteString("\n")
|
2024-05-24 09:15:34 +01:00
|
|
|
fmt.Fprintf(&b, "connected (%d):\n", len(n.nodes))
|
2024-04-10 15:35:09 +02:00
|
|
|
|
2024-05-24 09:15:34 +01:00
|
|
|
for _, key := range keys {
|
|
|
|
val, _ := n.connected.Load(key)
|
|
|
|
fmt.Fprintf(&b, "\t%d: %t\n", key, val)
|
|
|
|
}
|
2024-04-10 15:35:09 +02:00
|
|
|
|
|
|
|
return b.String()
|
2023-12-09 18:09:24 +01:00
|
|
|
}
|
2024-04-27 10:47:39 +02:00
|
|
|
|
|
|
|
type batcher struct {
|
|
|
|
tick *time.Ticker
|
|
|
|
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
cancelCh chan struct{}
|
|
|
|
|
|
|
|
changedNodeIDs set.Slice[types.NodeID]
|
|
|
|
nodesChanged bool
|
|
|
|
patches map[types.NodeID]tailcfg.PeerChange
|
|
|
|
patchesChanged bool
|
|
|
|
|
|
|
|
n *Notifier
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBatcher(batchTime time.Duration, n *Notifier) *batcher {
|
|
|
|
return &batcher{
|
|
|
|
tick: time.NewTicker(batchTime),
|
|
|
|
cancelCh: make(chan struct{}),
|
|
|
|
patches: make(map[types.NodeID]tailcfg.PeerChange),
|
|
|
|
n: n,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *batcher) close() {
|
|
|
|
b.cancelCh <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addOrPassthrough adds the update to the batcher, if it is not a
|
|
|
|
// type that is currently batched, it will be sent immediately.
|
|
|
|
func (b *batcher) addOrPassthrough(update types.StateUpdate) {
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Inc()
|
2024-04-27 10:47:39 +02:00
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Dec()
|
2024-04-27 10:47:39 +02:00
|
|
|
|
|
|
|
switch update.Type {
|
|
|
|
case types.StatePeerChanged:
|
|
|
|
b.changedNodeIDs.Add(update.ChangeNodes...)
|
|
|
|
b.nodesChanged = true
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherChanges.WithLabelValues().Set(float64(b.changedNodeIDs.Len()))
|
2024-04-27 10:47:39 +02:00
|
|
|
|
|
|
|
case types.StatePeerChangedPatch:
|
|
|
|
for _, newPatch := range update.ChangePatches {
|
|
|
|
if curr, ok := b.patches[types.NodeID(newPatch.NodeID)]; ok {
|
|
|
|
overwritePatch(&curr, newPatch)
|
|
|
|
b.patches[types.NodeID(newPatch.NodeID)] = curr
|
|
|
|
} else {
|
|
|
|
b.patches[types.NodeID(newPatch.NodeID)] = *newPatch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.patchesChanged = true
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherPatches.WithLabelValues().Set(float64(len(b.patches)))
|
2024-04-27 10:47:39 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
b.n.sendAll(update)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// flush sends all the accumulated patches to all
|
|
|
|
// nodes in the notifier.
|
|
|
|
func (b *batcher) flush() {
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Inc()
|
2024-04-27 10:47:39 +02:00
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Dec()
|
2024-04-27 10:47:39 +02:00
|
|
|
|
|
|
|
if b.nodesChanged || b.patchesChanged {
|
|
|
|
var patches []*tailcfg.PeerChange
|
|
|
|
// If a node is getting a full update from a change
|
|
|
|
// node update, then the patch can be dropped.
|
|
|
|
for nodeID, patch := range b.patches {
|
|
|
|
if b.changedNodeIDs.Contains(nodeID) {
|
|
|
|
delete(b.patches, nodeID)
|
|
|
|
} else {
|
|
|
|
patches = append(patches, &patch)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
changedNodes := b.changedNodeIDs.Slice().AsSlice()
|
|
|
|
sort.Slice(changedNodes, func(i, j int) bool {
|
|
|
|
return changedNodes[i] < changedNodes[j]
|
|
|
|
})
|
|
|
|
|
|
|
|
if b.changedNodeIDs.Slice().Len() > 0 {
|
|
|
|
update := types.StateUpdate{
|
|
|
|
Type: types.StatePeerChanged,
|
|
|
|
ChangeNodes: changedNodes,
|
|
|
|
}
|
|
|
|
|
|
|
|
b.n.sendAll(update)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(patches) > 0 {
|
|
|
|
patchUpdate := types.StateUpdate{
|
|
|
|
Type: types.StatePeerChangedPatch,
|
|
|
|
ChangePatches: patches,
|
|
|
|
}
|
|
|
|
|
|
|
|
b.n.sendAll(patchUpdate)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.changedNodeIDs = set.Slice[types.NodeID]{}
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherChanges.WithLabelValues().Set(0)
|
2024-04-27 10:47:39 +02:00
|
|
|
b.nodesChanged = false
|
|
|
|
b.patches = make(map[types.NodeID]tailcfg.PeerChange, len(b.patches))
|
2024-05-24 09:15:34 +01:00
|
|
|
notifierBatcherPatches.WithLabelValues().Set(0)
|
2024-04-27 10:47:39 +02:00
|
|
|
b.patchesChanged = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *batcher) doWork() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-b.cancelCh:
|
|
|
|
return
|
|
|
|
case <-b.tick.C:
|
|
|
|
b.flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// overwritePatch takes the current patch and a newer patch
|
2024-07-22 08:56:00 +02:00
|
|
|
// and override any field that has changed.
|
2024-04-27 10:47:39 +02:00
|
|
|
func overwritePatch(currPatch, newPatch *tailcfg.PeerChange) {
|
|
|
|
if newPatch.DERPRegion != 0 {
|
|
|
|
currPatch.DERPRegion = newPatch.DERPRegion
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.Cap != 0 {
|
|
|
|
currPatch.Cap = newPatch.Cap
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.CapMap != nil {
|
|
|
|
currPatch.CapMap = newPatch.CapMap
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.Endpoints != nil {
|
|
|
|
currPatch.Endpoints = newPatch.Endpoints
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.Key != nil {
|
|
|
|
currPatch.Key = newPatch.Key
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.KeySignature != nil {
|
|
|
|
currPatch.KeySignature = newPatch.KeySignature
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.DiscoKey != nil {
|
|
|
|
currPatch.DiscoKey = newPatch.DiscoKey
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.Online != nil {
|
|
|
|
currPatch.Online = newPatch.Online
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.LastSeen != nil {
|
|
|
|
currPatch.LastSeen = newPatch.LastSeen
|
|
|
|
}
|
|
|
|
|
|
|
|
if newPatch.KeyExpiry != nil {
|
|
|
|
currPatch.KeyExpiry = newPatch.KeyExpiry
|
|
|
|
}
|
|
|
|
}
|