mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-30 05:25:35 +00:00
21509db121
I saw some panics in CI, like: 2024-05-08T04:30:25.9553518Z ## WARNING: (non-fatal) nil health.Tracker (being strict in CI): 2024-05-08T04:30:25.9554043Z goroutine 801 [running]: 2024-05-08T04:30:25.9554489Z tailscale.com/health.(*Tracker).nil(0x0) 2024-05-08T04:30:25.9555086Z tailscale.com/health/health.go:185 +0x70 2024-05-08T04:30:25.9555688Z tailscale.com/health.(*Tracker).SetUDP4Unbound(0x0, 0x0) 2024-05-08T04:30:25.9556373Z tailscale.com/health/health.go:532 +0x2f 2024-05-08T04:30:25.9557296Z tailscale.com/wgengine/magicsock.(*Conn).bindSocket(0xc0003b4808, 0xc0003b4878, {0x1fbca53, 0x4}, 0x0) 2024-05-08T04:30:25.9558301Z tailscale.com/wgengine/magicsock/magicsock.go:2481 +0x12c5 2024-05-08T04:30:25.9559026Z tailscale.com/wgengine/magicsock.(*Conn).rebind(0xc0003b4808, 0x0) 2024-05-08T04:30:25.9559874Z tailscale.com/wgengine/magicsock/magicsock.go:2510 +0x16f 2024-05-08T04:30:25.9561038Z tailscale.com/wgengine/magicsock.NewConn({0xc000063c80, 0x0, 0xc000197930, 0xc000197950, 0xc000197960, {0x0, 0x0}, 0xc000197970, 0xc000198ee0, 0x0, ...}) 2024-05-08T04:30:25.9562402Z tailscale.com/wgengine/magicsock/magicsock.go:476 +0xd5f 2024-05-08T04:30:25.9563779Z tailscale.com/wgengine.NewUserspaceEngine(0xc000063c80, {{0x22c8750, 0xc0001976b0}, 0x0, {0x22c3210, 0xc000063c80}, {0x22c31d8, 0x2d3c900}, 0x0, 0x0, ...}) 2024-05-08T04:30:25.9564982Z tailscale.com/wgengine/userspace.go:389 +0x159d 2024-05-08T04:30:25.9565529Z tailscale.com/ipn/ipnlocal.newTestBackend(0xc000358b60) 2024-05-08T04:30:25.9566086Z tailscale.com/ipn/ipnlocal/serve_test.go:675 +0x2a5 2024-05-08T04:30:25.9566612Z ta Updates #11874 Change-Id: I3432ed52d670743e532be4642f38dbd6e3763b1b Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
115 lines
2.9 KiB
Go
115 lines
2.9 KiB
Go
// Copyright (c) Tailscale Inc & AUTHORS
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
package ipnlocal
|
|
|
|
import (
|
|
"reflect"
|
|
"testing"
|
|
"time"
|
|
|
|
"tailscale.com/ipn"
|
|
"tailscale.com/ipn/ipnstate"
|
|
"tailscale.com/ipn/store/mem"
|
|
"tailscale.com/tailcfg"
|
|
"tailscale.com/tsd"
|
|
"tailscale.com/tstest"
|
|
"tailscale.com/types/key"
|
|
"tailscale.com/types/logger"
|
|
"tailscale.com/types/logid"
|
|
"tailscale.com/types/persist"
|
|
"tailscale.com/wgengine"
|
|
)
|
|
|
|
// TestLocalLogLines tests to make sure that the log lines required for log parsing are
|
|
// being logged by the expected functions. Update these tests if moving log lines between
|
|
// functions.
|
|
func TestLocalLogLines(t *testing.T) {
|
|
logListen := tstest.NewLogLineTracker(t.Logf, []string{
|
|
"[v1] peer keys: %s",
|
|
"[v1] v%v peers: %v",
|
|
})
|
|
defer logListen.Close()
|
|
|
|
// Put a rate-limiter with a burst of 0 between the components below.
|
|
// This instructs the rate-limiter to eliminate all logging that
|
|
// isn't explicitly exempt from rate-limiting.
|
|
// This lets the logListen tracker verify that the rate-limiter allows these key lines.
|
|
logf := logger.RateLimitedFnWithClock(logListen.Logf, 5*time.Second, 0, 10, time.Now)
|
|
|
|
logid := func(hex byte) logid.PublicID {
|
|
var ret logid.PublicID
|
|
for i := range len(ret) {
|
|
ret[i] = hex
|
|
}
|
|
return ret
|
|
}
|
|
idA := logid(0xaa)
|
|
|
|
// set up a LocalBackend, super bare bones. No functional data.
|
|
sys := new(tsd.System)
|
|
store := new(mem.Store)
|
|
sys.Set(store)
|
|
e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
t.Cleanup(e.Close)
|
|
sys.Set(e)
|
|
|
|
lb, err := NewLocalBackend(logf, idA, sys, 0)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer lb.Shutdown()
|
|
|
|
lb.hostinfo = &tailcfg.Hostinfo{}
|
|
// hacky manual override of the usual log-on-change behaviour of keylogf
|
|
lb.keyLogf = logListen.Logf
|
|
|
|
testWantRemain := func(wantRemain ...string) func(t *testing.T) {
|
|
return func(t *testing.T) {
|
|
if remain := logListen.Check(); !reflect.DeepEqual(remain, wantRemain) {
|
|
t.Helper()
|
|
t.Errorf("remain %q, want %q", remain, wantRemain)
|
|
}
|
|
}
|
|
}
|
|
|
|
// log prefs line
|
|
persist := &persist.Persist{}
|
|
prefs := ipn.NewPrefs()
|
|
prefs.Persist = persist
|
|
lb.SetPrefsForTest(prefs)
|
|
|
|
t.Run("after_prefs", testWantRemain("[v1] peer keys: %s", "[v1] v%v peers: %v"))
|
|
|
|
// log peers, peer keys
|
|
lb.mu.Lock()
|
|
lb.parseWgStatusLocked(&wgengine.Status{
|
|
Peers: []ipnstate.PeerStatusLite{{
|
|
TxBytes: 10,
|
|
RxBytes: 10,
|
|
LastHandshake: time.Now(),
|
|
NodeKey: key.NewNode().Public(),
|
|
}},
|
|
})
|
|
lb.mu.Unlock()
|
|
|
|
t.Run("after_peers", testWantRemain())
|
|
|
|
// Log it again with different stats to ensure it's not dup-suppressed.
|
|
logListen.Reset()
|
|
lb.mu.Lock()
|
|
lb.parseWgStatusLocked(&wgengine.Status{
|
|
Peers: []ipnstate.PeerStatusLite{{
|
|
TxBytes: 11,
|
|
RxBytes: 12,
|
|
LastHandshake: time.Now(),
|
|
NodeKey: key.NewNode().Public(),
|
|
}},
|
|
})
|
|
lb.mu.Unlock()
|
|
t.Run("after_second_peer_status", testWantRemain())
|
|
}
|