mirror of
https://github.com/tailscale/tailscale.git
synced 2025-12-13 10:31:57 +00:00
ipn/ipnlocal: add primary and approved routes metrics
WIP Updates tailscale/corp#22075 Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
@@ -690,7 +690,9 @@ func (c *connBind) receiveDERP(buffs [][]byte, sizes []int, eps []conn.Endpoint)
|
||||
// No data read occurred. Wait for another packet.
|
||||
continue
|
||||
}
|
||||
metricRecvDataDERP.Add(1)
|
||||
metricRecvDataPacketsDERP.Add(1)
|
||||
c.metricInboundPacketsTotal.Add(pathLabel{Path: PathDERP}, 1)
|
||||
c.metricInboundBytesTotal.Add(pathLabel{Path: PathDERP}, int64(n))
|
||||
sizes[0] = n
|
||||
eps[0] = ep
|
||||
return 1, nil
|
||||
@@ -728,7 +730,7 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en
|
||||
|
||||
ep.noteRecvActivity(ipp, mono.Now())
|
||||
if stats := c.stats.Load(); stats != nil {
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, dm.n)
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n)
|
||||
}
|
||||
return n, ep
|
||||
}
|
||||
|
||||
@@ -950,6 +950,8 @@ func (de *endpoint) send(buffs [][]byte) error {
|
||||
return errNoUDPOrDERP
|
||||
}
|
||||
var err error
|
||||
// TODO(kradalby): for paring, why is this not an if-else? Do we send to
|
||||
// both DERP and UDP at the same time if we have both?
|
||||
if udpAddr.IsValid() {
|
||||
_, err = de.c.sendUDPBatch(udpAddr, buffs)
|
||||
|
||||
@@ -960,13 +962,23 @@ func (de *endpoint) send(buffs [][]byte) error {
|
||||
de.noteBadEndpoint(udpAddr)
|
||||
}
|
||||
|
||||
var txBytes int
|
||||
for _, b := range buffs {
|
||||
txBytes += len(b)
|
||||
}
|
||||
|
||||
switch {
|
||||
case udpAddr.Addr().Is4():
|
||||
de.c.metricOutboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv4}, int64(len(buffs)))
|
||||
de.c.metricOutboundBytesTotal.Add(pathLabel{Path: PathDirectIPv4}, int64(txBytes))
|
||||
case udpAddr.Addr().Is6():
|
||||
de.c.metricOutboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv6}, int64(len(buffs)))
|
||||
de.c.metricOutboundBytesTotal.Add(pathLabel{Path: PathDirectIPv6}, int64(txBytes))
|
||||
}
|
||||
|
||||
// TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends.
|
||||
if stats := de.c.stats.Load(); err == nil && stats != nil {
|
||||
var txBytes int
|
||||
for _, b := range buffs {
|
||||
txBytes += len(b)
|
||||
}
|
||||
stats.UpdateTxPhysical(de.nodeAddr, udpAddr, txBytes)
|
||||
stats.UpdateTxPhysical(de.nodeAddr, udpAddr, len(buffs), txBytes)
|
||||
}
|
||||
}
|
||||
if derpAddr.IsValid() {
|
||||
@@ -974,8 +986,11 @@ func (de *endpoint) send(buffs [][]byte) error {
|
||||
for _, buff := range buffs {
|
||||
ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff)
|
||||
if stats := de.c.stats.Load(); stats != nil {
|
||||
stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buff))
|
||||
stats.UpdateTxPhysical(de.nodeAddr, derpAddr, 1, len(buff))
|
||||
}
|
||||
// TODO(kradalby): Is this the correct place for this? Do we need an Error version?
|
||||
de.c.metricOutboundPacketsTotal.Add(pathLabel{Path: PathDERP}, 1)
|
||||
de.c.metricOutboundBytesTotal.Add(pathLabel{Path: PathDERP}, int64(len(buff)))
|
||||
if !ok {
|
||||
allOk = false
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/metrics"
|
||||
"tailscale.com/net/connstats"
|
||||
"tailscale.com/net/netcheck"
|
||||
"tailscale.com/net/neterror"
|
||||
@@ -60,6 +61,7 @@ import (
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/util/uniq"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/wgint"
|
||||
)
|
||||
@@ -320,6 +322,11 @@ type Conn struct {
|
||||
// responsibility to ensure that traffic from these endpoints is routed
|
||||
// to the node.
|
||||
staticEndpoints views.Slice[netip.AddrPort]
|
||||
|
||||
metricInboundPacketsTotal *metrics.MultiLabelMap[pathLabel]
|
||||
metricOutboundPacketsTotal *metrics.MultiLabelMap[pathLabel]
|
||||
metricInboundBytesTotal *metrics.MultiLabelMap[pathLabel]
|
||||
metricOutboundBytesTotal *metrics.MultiLabelMap[pathLabel]
|
||||
}
|
||||
|
||||
// SetDebugLoggingEnabled controls whether spammy debug logging is enabled.
|
||||
@@ -386,6 +393,9 @@ type Options struct {
|
||||
// report errors and warnings to.
|
||||
HealthTracker *health.Tracker
|
||||
|
||||
// UserMetricsRegistry specifies the metrics registry to record metrics to.
|
||||
UserMetricsRegistry *usermetric.Registry
|
||||
|
||||
// ControlKnobs are the set of control knobs to use.
|
||||
// If nil, they're ignored and not updated.
|
||||
ControlKnobs *controlknobs.Knobs
|
||||
@@ -466,6 +476,10 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
return nil, errors.New("magicsock.Options.NetMon must be non-nil")
|
||||
}
|
||||
|
||||
if opts.UserMetricsRegistry == nil {
|
||||
return nil, errors.New("magicsock.Options.UserMetrics must be non-nil")
|
||||
}
|
||||
|
||||
c := newConn(opts.logf())
|
||||
c.port.Store(uint32(opts.Port))
|
||||
c.controlKnobs = opts.ControlKnobs
|
||||
@@ -505,6 +519,32 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
UseDNSCache: true,
|
||||
}
|
||||
|
||||
// TODO(kradalby): factor out to a func
|
||||
c.metricInboundBytesTotal = usermetric.NewMultiLabelMap[pathLabel](
|
||||
opts.UserMetricsRegistry,
|
||||
"tailscaled_inbound_bytes_total",
|
||||
"counter",
|
||||
"Counts the number of bytes received from other peers",
|
||||
)
|
||||
c.metricInboundPacketsTotal = usermetric.NewMultiLabelMap[pathLabel](
|
||||
opts.UserMetricsRegistry,
|
||||
"tailscaled_inbound_packets_total",
|
||||
"counter",
|
||||
"Counts the number of packets received from other peers",
|
||||
)
|
||||
c.metricOutboundBytesTotal = usermetric.NewMultiLabelMap[pathLabel](
|
||||
opts.UserMetricsRegistry,
|
||||
"tailscaled_outbound_bytes_total",
|
||||
"counter",
|
||||
"Counts the number of bytes sent to other peers",
|
||||
)
|
||||
c.metricOutboundPacketsTotal = usermetric.NewMultiLabelMap[pathLabel](
|
||||
opts.UserMetricsRegistry,
|
||||
"tailscaled_outbound_packets_total",
|
||||
"counter",
|
||||
"Counts the number of packets sent to other peers",
|
||||
)
|
||||
|
||||
if d4, err := c.listenRawDisco("ip4"); err == nil {
|
||||
c.logf("[v1] using BPF disco receiver for IPv4")
|
||||
c.closeDisco4 = d4
|
||||
@@ -1145,6 +1185,25 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) {
|
||||
} else {
|
||||
if sent {
|
||||
metricSendUDP.Add(1)
|
||||
|
||||
// TODO(kradalby): Do we need error variants of these?
|
||||
switch {
|
||||
case ipp.Addr().Is4():
|
||||
c.metricOutboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv4}, 1)
|
||||
c.metricOutboundBytesTotal.Add(pathLabel{Path: PathDirectIPv4}, int64(len(b)))
|
||||
case ipp.Addr().Is6():
|
||||
c.metricOutboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv6}, 1)
|
||||
c.metricOutboundBytesTotal.Add(pathLabel{Path: PathDirectIPv6}, int64(len(b)))
|
||||
}
|
||||
|
||||
if stats := c.stats.Load(); stats != nil {
|
||||
c.mu.Lock()
|
||||
ep, ok := c.peerMap.endpointForIPPort(ipp)
|
||||
c.mu.Unlock()
|
||||
if ok {
|
||||
stats.UpdateTxPhysical(ep.nodeAddr, ipp, 1, len(b))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1266,17 +1325,29 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) {
|
||||
|
||||
// receiveIPv4 creates an IPv4 ReceiveFunc reading from c.pconn4.
|
||||
func (c *Conn) receiveIPv4() conn.ReceiveFunc {
|
||||
return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), metricRecvDataIPv4)
|
||||
return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4),
|
||||
func(i int64) {
|
||||
metricRecvDataPacketsIPv4.Add(i)
|
||||
c.metricInboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv4}, i)
|
||||
}, func(i int64) {
|
||||
c.metricInboundBytesTotal.Add(pathLabel{Path: PathDirectIPv4}, i)
|
||||
})
|
||||
}
|
||||
|
||||
// receiveIPv6 creates an IPv6 ReceiveFunc reading from c.pconn6.
|
||||
func (c *Conn) receiveIPv6() conn.ReceiveFunc {
|
||||
return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), metricRecvDataIPv6)
|
||||
return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6),
|
||||
func(i int64) {
|
||||
metricRecvDataPacketsIPv6.Add(i)
|
||||
c.metricInboundPacketsTotal.Add(pathLabel{Path: PathDirectIPv6}, i)
|
||||
}, func(i int64) {
|
||||
c.metricInboundBytesTotal.Add(pathLabel{Path: PathDirectIPv6}, i)
|
||||
})
|
||||
}
|
||||
|
||||
// mkReceiveFunc creates a ReceiveFunc reading from ruc.
|
||||
// The provided healthItem and metric are updated if non-nil.
|
||||
func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, metric *clientmetric.Metric) conn.ReceiveFunc {
|
||||
// The provided healthItem and metrics are updated if non-nil.
|
||||
func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetricFunc, bytesMetricFunc func(int64)) conn.ReceiveFunc {
|
||||
// epCache caches an IPPort->endpoint for hot flows.
|
||||
var epCache ippEndpointCache
|
||||
|
||||
@@ -1313,8 +1384,11 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu
|
||||
}
|
||||
ipp := msg.Addr.(*net.UDPAddr).AddrPort()
|
||||
if ep, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok {
|
||||
if metric != nil {
|
||||
metric.Add(1)
|
||||
if packetMetricFunc != nil {
|
||||
packetMetricFunc(1)
|
||||
}
|
||||
if bytesMetricFunc != nil {
|
||||
bytesMetricFunc(int64(msg.N))
|
||||
}
|
||||
eps[i] = ep
|
||||
sizes[i] = msg.N
|
||||
@@ -1370,7 +1444,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache)
|
||||
ep.lastRecvUDPAny.StoreAtomic(now)
|
||||
ep.noteRecvActivity(ipp, now)
|
||||
if stats := c.stats.Load(); stats != nil {
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, len(b))
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b))
|
||||
}
|
||||
return ep, true
|
||||
}
|
||||
@@ -2924,9 +2998,9 @@ var (
|
||||
// Data packets (non-disco)
|
||||
metricSendData = clientmetric.NewCounter("magicsock_send_data")
|
||||
metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down")
|
||||
metricRecvDataDERP = clientmetric.NewCounter("magicsock_recv_data_derp")
|
||||
metricRecvDataIPv4 = clientmetric.NewCounter("magicsock_recv_data_ipv4")
|
||||
metricRecvDataIPv6 = clientmetric.NewCounter("magicsock_recv_data_ipv6")
|
||||
metricRecvDataPacketsDERP = clientmetric.NewCounter("magicsock_recv_data_derp")
|
||||
metricRecvDataPacketsIPv4 = clientmetric.NewCounter("magicsock_recv_data_ipv4")
|
||||
metricRecvDataPacketsIPv6 = clientmetric.NewCounter("magicsock_recv_data_ipv6")
|
||||
|
||||
// Disco packets
|
||||
metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp")
|
||||
@@ -3064,3 +3138,19 @@ func (le *lazyEndpoint) GetPeerEndpoint(peerPublicKey [32]byte) conn.Endpoint {
|
||||
le.c.logf("magicsock: lazyEndpoint.GetPeerEndpoint(%v) found: %v", pubKey.ShortString(), ep.nodeAddr)
|
||||
return ep
|
||||
}
|
||||
|
||||
type Path string
|
||||
|
||||
const (
|
||||
PathDirectIPv4 Path = "direct_ipv4"
|
||||
PathDirectIPv6 Path = "direct_ipv6"
|
||||
PathDERP Path = "derp"
|
||||
)
|
||||
|
||||
type pathLabel struct {
|
||||
// Path indicates the path that the packet took:
|
||||
// - direct_ipv4
|
||||
// - direct_ipv6
|
||||
// - derp
|
||||
Path Path
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
wgconn "github.com/tailscale/wireguard-go/conn"
|
||||
"github.com/tailscale/wireguard-go/device"
|
||||
"github.com/tailscale/wireguard-go/tun/tuntest"
|
||||
@@ -64,6 +66,7 @@ import (
|
||||
"tailscale.com/util/cibuild"
|
||||
"tailscale.com/util/racebuild"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"tailscale.com/wgengine/wgcfg/nmcfg"
|
||||
@@ -156,6 +159,7 @@ type magicStack struct {
|
||||
dev *device.Device // the wireguard-go Device that connects the previous things
|
||||
wgLogger *wglog.Logger // wireguard-go log wrapper
|
||||
netMon *netmon.Monitor // always non-nil
|
||||
metrics *usermetric.Registry
|
||||
}
|
||||
|
||||
// newMagicStack builds and initializes an idle magicsock and
|
||||
@@ -174,6 +178,8 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
|
||||
t.Fatalf("netmon.New: %v", err)
|
||||
}
|
||||
|
||||
var reg usermetric.Registry
|
||||
|
||||
epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary
|
||||
conn, err := NewConn(Options{
|
||||
NetMon: netMon,
|
||||
@@ -183,6 +189,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
|
||||
EndpointsFunc: func(eps []tailcfg.Endpoint) {
|
||||
epCh <- eps
|
||||
},
|
||||
UserMetricsRegistry: ®,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("constructing magicsock: %v", err)
|
||||
@@ -193,7 +200,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
|
||||
}
|
||||
|
||||
tun := tuntest.NewChannelTUN()
|
||||
tsTun := tstun.Wrap(logf, tun.TUN())
|
||||
tsTun := tstun.Wrap(logf, tun.TUN(), nil)
|
||||
tsTun.SetFilter(filter.NewAllowAllForTest(logf))
|
||||
tsTun.Start()
|
||||
|
||||
@@ -219,6 +226,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
|
||||
dev: dev,
|
||||
wgLogger: wgLogger,
|
||||
netMon: netMon,
|
||||
metrics: ®,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -392,11 +400,12 @@ func TestNewConn(t *testing.T) {
|
||||
|
||||
port := pickPort(t)
|
||||
conn, err := NewConn(Options{
|
||||
Port: port,
|
||||
DisablePortMapper: true,
|
||||
EndpointsFunc: epFunc,
|
||||
Logf: t.Logf,
|
||||
NetMon: netMon,
|
||||
Port: port,
|
||||
DisablePortMapper: true,
|
||||
EndpointsFunc: epFunc,
|
||||
Logf: t.Logf,
|
||||
NetMon: netMon,
|
||||
UserMetricsRegistry: new(usermetric.Registry),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -519,10 +528,12 @@ func TestDeviceStartStop(t *testing.T) {
|
||||
}
|
||||
defer netMon.Close()
|
||||
|
||||
reg := new(usermetric.Registry)
|
||||
conn, err := NewConn(Options{
|
||||
EndpointsFunc: func(eps []tailcfg.Endpoint) {},
|
||||
Logf: t.Logf,
|
||||
NetMon: netMon,
|
||||
EndpointsFunc: func(eps []tailcfg.Endpoint) {},
|
||||
Logf: t.Logf,
|
||||
NetMon: netMon,
|
||||
UserMetricsRegistry: reg,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1181,6 +1192,100 @@ func testTwoDevicePing(t *testing.T, d *devices) {
|
||||
checkStats(t, m1, m1Conns)
|
||||
checkStats(t, m2, m2Conns)
|
||||
})
|
||||
|
||||
t.Run("compare-metrics-stats", func(t *testing.T) {
|
||||
setT(t)
|
||||
defer setT(outerT)
|
||||
m1.conn.resetMetricsForTest()
|
||||
m1.stats.TestExtract()
|
||||
m2.conn.resetMetricsForTest()
|
||||
m2.stats.TestExtract()
|
||||
t.Logf("Metrics before: %s\n", m1.metrics.String())
|
||||
ping1(t)
|
||||
ping2(t)
|
||||
assertConnStatsAndUserMetricsEqual(t, m1)
|
||||
assertConnStatsAndUserMetricsEqual(t, m2)
|
||||
t.Logf("Metrics after: %s\n", m1.metrics.String())
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Conn) resetMetricsForTest() {
|
||||
c.metricInboundBytesTotal.ResetAllForTest()
|
||||
c.metricInboundPacketsTotal.ResetAllForTest()
|
||||
c.metricOutboundBytesTotal.ResetAllForTest()
|
||||
c.metricOutboundPacketsTotal.ResetAllForTest()
|
||||
}
|
||||
|
||||
func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) {
|
||||
_, phys := ms.stats.TestExtract()
|
||||
|
||||
physIPv4RxBytes := int64(0)
|
||||
physIPv4TxBytes := int64(0)
|
||||
physDERPRxBytes := int64(0)
|
||||
physDERPTxBytes := int64(0)
|
||||
physIPv4RxPackets := int64(0)
|
||||
physIPv4TxPackets := int64(0)
|
||||
physDERPRxPackets := int64(0)
|
||||
physDERPTxPackets := int64(0)
|
||||
for conn, count := range phys {
|
||||
t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String())
|
||||
if conn.Dst.String() == "127.3.3.40:1" {
|
||||
physDERPRxBytes += int64(count.RxBytes)
|
||||
physDERPTxBytes += int64(count.TxBytes)
|
||||
physDERPRxPackets += int64(count.RxPackets)
|
||||
physDERPTxPackets += int64(count.TxPackets)
|
||||
} else {
|
||||
physIPv4RxBytes += int64(count.RxBytes)
|
||||
physIPv4TxBytes += int64(count.TxBytes)
|
||||
physIPv4RxPackets += int64(count.RxPackets)
|
||||
physIPv4TxPackets += int64(count.TxPackets)
|
||||
}
|
||||
}
|
||||
|
||||
var metricIPv4RxBytes, metricIPv4TxBytes, metricDERPRxBytes, metricDERPTxBytes int64
|
||||
var metricIPv4RxPackets, metricIPv4TxPackets, metricDERPRxPackets, metricDERPTxPackets int64
|
||||
|
||||
if m, ok := ms.conn.metricInboundBytesTotal.Get(pathLabel{Path: PathDirectIPv4}).(*expvar.Int); ok {
|
||||
metricIPv4RxBytes = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricOutboundBytesTotal.Get(pathLabel{Path: PathDirectIPv4}).(*expvar.Int); ok {
|
||||
metricIPv4TxBytes = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricInboundBytesTotal.Get(pathLabel{Path: PathDERP}).(*expvar.Int); ok {
|
||||
metricDERPRxBytes = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricOutboundBytesTotal.Get(pathLabel{Path: PathDERP}).(*expvar.Int); ok {
|
||||
metricDERPTxBytes = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricInboundPacketsTotal.Get(pathLabel{Path: PathDirectIPv4}).(*expvar.Int); ok {
|
||||
metricIPv4RxPackets = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricOutboundPacketsTotal.Get(pathLabel{Path: PathDirectIPv4}).(*expvar.Int); ok {
|
||||
metricIPv4TxPackets = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricInboundPacketsTotal.Get(pathLabel{Path: PathDERP}).(*expvar.Int); ok {
|
||||
metricDERPRxPackets = m.Value()
|
||||
}
|
||||
if m, ok := ms.conn.metricOutboundPacketsTotal.Get(pathLabel{Path: PathDERP}).(*expvar.Int); ok {
|
||||
metricDERPTxPackets = m.Value()
|
||||
}
|
||||
|
||||
assertEqual(t, "derp bytes inbound", physDERPRxBytes, metricDERPRxBytes)
|
||||
assertEqual(t, "derp bytes outbound", physDERPTxBytes, metricDERPTxBytes)
|
||||
assertEqual(t, "ipv4 bytes inbound", physIPv4RxBytes, metricIPv4RxBytes)
|
||||
assertEqual(t, "ipv4 bytes outbound", physIPv4TxBytes, metricIPv4TxBytes)
|
||||
assertEqual(t, "derp packets inbound", physDERPRxPackets, metricDERPRxPackets)
|
||||
assertEqual(t, "derp packets outbound", physDERPTxPackets, metricDERPTxPackets)
|
||||
assertEqual(t, "ipv4 packets inbound", physIPv4RxPackets, metricIPv4RxPackets)
|
||||
assertEqual(t, "ipv4 packets outbound", physIPv4TxPackets, metricIPv4TxPackets)
|
||||
}
|
||||
|
||||
func assertEqual(t *testing.T, name string, a, b any) {
|
||||
t.Helper()
|
||||
t.Logf("assertEqual %s: %v == %v", name, a, b)
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("%s mismatch (-want +got):\n%s", name, diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoMessage(t *testing.T) {
|
||||
@@ -1275,6 +1380,7 @@ func newTestConn(t testing.TB) *Conn {
|
||||
conn, err := NewConn(Options{
|
||||
NetMon: netMon,
|
||||
HealthTracker: new(health.Tracker),
|
||||
UserMetricsRegistry: new(usermetric.Registry),
|
||||
DisablePortMapper: true,
|
||||
Logf: t.Logf,
|
||||
Port: port,
|
||||
|
||||
@@ -46,10 +46,11 @@ func TestInjectInboundLeak(t *testing.T) {
|
||||
}
|
||||
sys := new(tsd.System)
|
||||
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
||||
Tun: tunDev,
|
||||
Dialer: dialer,
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
Tun: tunDev,
|
||||
Dialer: dialer,
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
UserMetricsRegistry: sys.UserMetricsRegistry(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -103,10 +104,11 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl {
|
||||
dialer := new(tsdial.Dialer)
|
||||
logf := tstest.WhileTestRunningLogger(tb)
|
||||
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
||||
Tun: tunDev,
|
||||
Dialer: dialer,
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
Tun: tunDev,
|
||||
Dialer: dialer,
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
UserMetricsRegistry: sys.UserMetricsRegistry(),
|
||||
})
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
|
||||
@@ -49,6 +49,7 @@ import (
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/filter"
|
||||
@@ -195,6 +196,9 @@ type Config struct {
|
||||
// HealthTracker, if non-nil, is the health tracker to use.
|
||||
HealthTracker *health.Tracker
|
||||
|
||||
// UserMetricsRegistry, if non-nil, is the usermetrics registry to use.
|
||||
UserMetricsRegistry *usermetric.Registry
|
||||
|
||||
// Dialer is the dialer to use for outbound connections.
|
||||
// If nil, a new Dialer is created.
|
||||
Dialer *tsdial.Dialer
|
||||
@@ -249,6 +253,8 @@ func NewFakeUserspaceEngine(logf logger.Logf, opts ...any) (Engine, error) {
|
||||
conf.ControlKnobs = v
|
||||
case *health.Tracker:
|
||||
conf.HealthTracker = v
|
||||
case *usermetric.Registry:
|
||||
conf.UserMetricsRegistry = v
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown option type %T", v)
|
||||
}
|
||||
@@ -289,9 +295,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error)
|
||||
|
||||
var tsTUNDev *tstun.Wrapper
|
||||
if conf.IsTAP {
|
||||
tsTUNDev = tstun.WrapTAP(logf, conf.Tun)
|
||||
tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.UserMetricsRegistry)
|
||||
} else {
|
||||
tsTUNDev = tstun.Wrap(logf, conf.Tun)
|
||||
tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.UserMetricsRegistry)
|
||||
}
|
||||
closePool.add(tsTUNDev)
|
||||
|
||||
@@ -379,17 +385,18 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error)
|
||||
}
|
||||
}
|
||||
magicsockOpts := magicsock.Options{
|
||||
Logf: logf,
|
||||
Port: conf.ListenPort,
|
||||
EndpointsFunc: endpointsFn,
|
||||
DERPActiveFunc: e.RequestStatus,
|
||||
IdleFunc: e.tundev.IdleDuration,
|
||||
NoteRecvActivity: e.noteRecvActivity,
|
||||
NetMon: e.netMon,
|
||||
HealthTracker: e.health,
|
||||
ControlKnobs: conf.ControlKnobs,
|
||||
OnPortUpdate: onPortUpdate,
|
||||
PeerByKeyFunc: e.PeerByKey,
|
||||
Logf: logf,
|
||||
Port: conf.ListenPort,
|
||||
EndpointsFunc: endpointsFn,
|
||||
DERPActiveFunc: e.RequestStatus,
|
||||
IdleFunc: e.tundev.IdleDuration,
|
||||
NoteRecvActivity: e.noteRecvActivity,
|
||||
NetMon: e.netMon,
|
||||
HealthTracker: e.health,
|
||||
UserMetricsRegistry: conf.UserMetricsRegistry,
|
||||
ControlKnobs: conf.ControlKnobs,
|
||||
OnPortUpdate: onPortUpdate,
|
||||
PeerByKeyFunc: e.PeerByKey,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -20,8 +20,9 @@ func TestIsNetstack(t *testing.T) {
|
||||
e, err := wgengine.NewUserspaceEngine(
|
||||
tstest.WhileTestRunningLogger(t),
|
||||
wgengine.Config{
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
SetSubsystem: sys.Set,
|
||||
HealthTracker: sys.HealthTracker(),
|
||||
UserMetricsRegistry: sys.UserMetricsRegistry(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -72,6 +73,7 @@ func TestIsNetstackRouter(t *testing.T) {
|
||||
conf := tt.conf
|
||||
conf.SetSubsystem = sys.Set
|
||||
conf.HealthTracker = sys.HealthTracker()
|
||||
conf.UserMetricsRegistry = sys.UserMetricsRegistry()
|
||||
e, err := wgengine.NewUserspaceEngine(logger.Discard, conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/wgengine/router"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
)
|
||||
@@ -100,7 +101,8 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView {
|
||||
|
||||
func TestUserspaceEngineReconfig(t *testing.T) {
|
||||
ht := new(health.Tracker)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, ht)
|
||||
reg := new(usermetric.Registry)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -167,9 +169,10 @@ func TestUserspaceEnginePortReconfig(t *testing.T) {
|
||||
// Keep making a wgengine until we find an unused port
|
||||
var ue *userspaceEngine
|
||||
ht := new(health.Tracker)
|
||||
reg := new(usermetric.Registry)
|
||||
for i := range 100 {
|
||||
attempt := uint16(defaultPort + i)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -249,7 +252,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) {
|
||||
var knobs controlknobs.Knobs
|
||||
|
||||
ht := new(health.Tracker)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht)
|
||||
reg := new(usermetric.Registry)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/util/usermetric"
|
||||
)
|
||||
|
||||
func TestWatchdog(t *testing.T) {
|
||||
@@ -24,7 +25,8 @@ func TestWatchdog(t *testing.T) {
|
||||
t.Run("default watchdog does not fire", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ht := new(health.Tracker)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, ht)
|
||||
reg := new(usermetric.Registry)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user