types/netlogtype: new package for network logging types (#6092)

The netlog.Message type is useful to depend on from other packages,
but doing so would transitively cause gvisor and other large packages
to be linked in.

Avoid this problem by moving all network logging types to a single package.

We also update staticcheck to take in:

	003d277bcf

Signed-off-by: Joe Tsai <joetsai@digital-static.net>
This commit is contained in:
Joe Tsai
2022-10-27 14:14:18 -07:00
committed by GitHub
parent a44687e71f
commit c21a3c4733
11 changed files with 112 additions and 91 deletions

View File

@@ -22,7 +22,6 @@ import (
"golang.zx2c4.com/wireguard/tun"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"tailscale.com/disco"
"tailscale.com/net/flowtrack"
"tailscale.com/net/packet"
"tailscale.com/net/tsaddr"
"tailscale.com/net/tunstats"
@@ -31,6 +30,7 @@ import (
"tailscale.com/types/ipproto"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netlogtype"
"tailscale.com/util/clientmetric"
"tailscale.com/wgengine/filter"
)
@@ -853,7 +853,7 @@ func (t *Wrapper) SetStatisticsEnabled(enable bool) {
// ExtractStatistics extracts and resets the counters for all active connections.
// It must be called periodically otherwise the memory used is unbounded.
func (t *Wrapper) ExtractStatistics() map[flowtrack.Tuple]tunstats.Counts {
func (t *Wrapper) ExtractStatistics() map[netlogtype.Connection]netlogtype.Counts {
return t.stats.Extract()
}

View File

@@ -19,15 +19,14 @@ import (
"go4.org/netipx"
"golang.zx2c4.com/wireguard/tun/tuntest"
"tailscale.com/disco"
"tailscale.com/net/flowtrack"
"tailscale.com/net/netaddr"
"tailscale.com/net/packet"
"tailscale.com/net/tunstats"
"tailscale.com/tstest"
"tailscale.com/tstime/mono"
"tailscale.com/types/ipproto"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netlogtype"
"tailscale.com/wgengine/filter"
)
@@ -379,17 +378,17 @@ func TestFilter(t *testing.T) {
}
got := tun.ExtractStatistics()
want := map[flowtrack.Tuple]tunstats.Counts{}
want := map[netlogtype.Connection]netlogtype.Counts{}
if !tt.drop {
var p packet.Parsed
p.Decode(tt.data)
switch tt.dir {
case in:
tuple := flowtrack.Tuple{Proto: ipproto.UDP, Src: p.Dst, Dst: p.Src}
want[tuple] = tunstats.Counts{RxPackets: 1, RxBytes: uint64(len(tt.data))}
conn := netlogtype.Connection{Proto: ipproto.UDP, Src: p.Dst, Dst: p.Src}
want[conn] = netlogtype.Counts{RxPackets: 1, RxBytes: uint64(len(tt.data))}
case out:
tuple := flowtrack.Tuple{Proto: ipproto.UDP, Src: p.Src, Dst: p.Dst}
want[tuple] = tunstats.Counts{TxPackets: 1, TxBytes: uint64(len(tt.data))}
conn := netlogtype.Connection{Proto: ipproto.UDP, Src: p.Src, Dst: p.Dst}
want[conn] = netlogtype.Counts{TxPackets: 1, TxBytes: uint64(len(tt.data))}
}
}
if !reflect.DeepEqual(got, want) {

View File

@@ -9,8 +9,8 @@ package tunstats
import (
"sync"
"tailscale.com/net/flowtrack"
"tailscale.com/net/packet"
"tailscale.com/types/netlogtype"
)
// Statistics maintains counters for every connection.
@@ -18,36 +18,19 @@ import (
// The zero value is ready for use.
type Statistics struct {
mu sync.Mutex
m map[flowtrack.Tuple]Counts
}
// Counts are statistics about a particular connection.
type Counts struct {
TxPackets uint64 `json:"txPkts,omitempty"`
TxBytes uint64 `json:"txBytes,omitempty"`
RxPackets uint64 `json:"rxPkts,omitempty"`
RxBytes uint64 `json:"rxBytes,omitempty"`
}
// Add adds the counts from both c1 and c2.
func (c1 Counts) Add(c2 Counts) Counts {
c1.TxPackets += c2.TxPackets
c1.TxBytes += c2.TxBytes
c1.RxPackets += c2.RxPackets
c1.RxBytes += c2.RxBytes
return c1
m map[netlogtype.Connection]netlogtype.Counts
}
// UpdateTx updates the counters for a transmitted IP packet
// The source and destination of the packet directly correspond with
// the source and destination in flowtrack.Tuple.
// the source and destination in netlogtype.Connection.
func (s *Statistics) UpdateTx(b []byte) {
s.update(b, false)
}
// UpdateRx updates the counters for a received IP packet.
// The source and destination of the packet are inverted with respect to
// the source and destination in flowtrack.Tuple.
// the source and destination in netlogtype.Connection.
func (s *Statistics) UpdateRx(b []byte) {
s.update(b, true)
}
@@ -55,17 +38,17 @@ func (s *Statistics) UpdateRx(b []byte) {
func (s *Statistics) update(b []byte, receive bool) {
var p packet.Parsed
p.Decode(b)
tuple := flowtrack.Tuple{Proto: p.IPProto, Src: p.Src, Dst: p.Dst}
conn := netlogtype.Connection{Proto: p.IPProto, Src: p.Src, Dst: p.Dst}
if receive {
tuple.Src, tuple.Dst = tuple.Dst, tuple.Src
conn.Src, conn.Dst = conn.Dst, conn.Src
}
s.mu.Lock()
defer s.mu.Unlock()
if s.m == nil {
s.m = make(map[flowtrack.Tuple]Counts)
s.m = make(map[netlogtype.Connection]netlogtype.Counts)
}
cnts := s.m[tuple]
cnts := s.m[conn]
if receive {
cnts.RxPackets++
cnts.RxBytes += uint64(len(b))
@@ -73,15 +56,15 @@ func (s *Statistics) update(b []byte, receive bool) {
cnts.TxPackets++
cnts.TxBytes += uint64(len(b))
}
s.m[tuple] = cnts
s.m[conn] = cnts
}
// Extract extracts and resets the counters for all active connections.
// It must be called periodically otherwise the memory used is unbounded.
func (s *Statistics) Extract() map[flowtrack.Tuple]Counts {
func (s *Statistics) Extract() map[netlogtype.Connection]netlogtype.Counts {
s.mu.Lock()
defer s.mu.Unlock()
m := s.m
s.m = make(map[flowtrack.Tuple]Counts)
s.m = make(map[netlogtype.Connection]netlogtype.Counts)
return m
}

View File

@@ -15,8 +15,8 @@ import (
"time"
qt "github.com/frankban/quicktest"
"tailscale.com/net/flowtrack"
"tailscale.com/types/ipproto"
"tailscale.com/types/netlogtype"
)
func testPacketV4(proto ipproto.Proto, srcAddr, dstAddr [4]byte, srcPort, dstPort, size uint16) (out []byte) {
@@ -48,17 +48,17 @@ func TestConcurrent(t *testing.T) {
c := qt.New(t)
var stats Statistics
var wants []map[flowtrack.Tuple]Counts
gots := make([]map[flowtrack.Tuple]Counts, runtime.NumCPU())
var wants []map[netlogtype.Connection]netlogtype.Counts
gots := make([]map[netlogtype.Connection]netlogtype.Counts, runtime.NumCPU())
var group sync.WaitGroup
for i := range gots {
group.Add(1)
go func(i int) {
defer group.Done()
gots[i] = make(map[flowtrack.Tuple]Counts)
gots[i] = make(map[netlogtype.Connection]netlogtype.Counts)
rn := rand.New(rand.NewSource(time.Now().UnixNano()))
var p []byte
var t flowtrack.Tuple
var t netlogtype.Connection
for j := 0; j < 1000; j++ {
delay := rn.Intn(10000)
if p == nil || rn.Intn(64) == 0 {
@@ -72,7 +72,7 @@ func TestConcurrent(t *testing.T) {
dstPort := uint16(rand.Intn(16))
size := uint16(64 + rand.Intn(1024))
p = testPacketV4(proto, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size)
t = flowtrack.Tuple{Proto: proto, Src: netip.AddrPortFrom(srcAddr, srcPort), Dst: netip.AddrPortFrom(dstAddr, dstPort)}
t = netlogtype.Connection{Proto: proto, Src: netip.AddrPortFrom(srcAddr, srcPort), Dst: netip.AddrPortFrom(dstAddr, dstPort)}
}
t2 := t
receive := rn.Intn(2) == 0
@@ -102,17 +102,17 @@ func TestConcurrent(t *testing.T) {
group.Wait()
wants = append(wants, stats.Extract())
got := make(map[flowtrack.Tuple]Counts)
want := make(map[flowtrack.Tuple]Counts)
got := make(map[netlogtype.Connection]netlogtype.Counts)
want := make(map[netlogtype.Connection]netlogtype.Counts)
mergeMaps(got, gots...)
mergeMaps(want, wants...)
c.Assert(got, qt.DeepEquals, want)
}
func mergeMaps(dst map[flowtrack.Tuple]Counts, srcs ...map[flowtrack.Tuple]Counts) {
func mergeMaps(dst map[netlogtype.Connection]netlogtype.Counts, srcs ...map[netlogtype.Connection]netlogtype.Counts) {
for _, src := range srcs {
for tuple, cnts := range src {
dst[tuple] = dst[tuple].Add(cnts)
for conn, cnts := range src {
dst[conn] = dst[conn].Add(cnts)
}
}
}