mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-25 19:15:34 +00:00
ipn/{ipnlocal,localapi},wgengine{,/magicsock}: plumb health.Tracker
Down to 25 health.Global users. After this remains controlclient & net/dns & wgengine/router. Updates #11874 Updates #4136 Change-Id: I6dd1856e3d9bf523bdd44b60fb3b8f7501d5dc0d Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
df8f40905b
commit
6d69fc137f
@ -138,6 +138,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||||||
tailscale.com/types/structs from tailscale.com/ipn+
|
tailscale.com/types/structs from tailscale.com/ipn+
|
||||||
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
|
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
|
||||||
tailscale.com/types/views from tailscale.com/ipn+
|
tailscale.com/types/views from tailscale.com/ipn+
|
||||||
|
tailscale.com/util/cibuild from tailscale.com/health
|
||||||
tailscale.com/util/clientmetric from tailscale.com/net/netmon+
|
tailscale.com/util/clientmetric from tailscale.com/net/netmon+
|
||||||
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
|
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
|
||||||
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
|
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
|
||||||
|
@ -142,6 +142,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
|||||||
tailscale.com/types/structs from tailscale.com/ipn+
|
tailscale.com/types/structs from tailscale.com/ipn+
|
||||||
tailscale.com/types/tkatype from tailscale.com/types/key+
|
tailscale.com/types/tkatype from tailscale.com/types/key+
|
||||||
tailscale.com/types/views from tailscale.com/tailcfg+
|
tailscale.com/types/views from tailscale.com/tailcfg+
|
||||||
|
tailscale.com/util/cibuild from tailscale.com/health
|
||||||
tailscale.com/util/clientmetric from tailscale.com/net/netcheck+
|
tailscale.com/util/clientmetric from tailscale.com/net/netcheck+
|
||||||
tailscale.com/util/cloudenv from tailscale.com/net/dnscache+
|
tailscale.com/util/cloudenv from tailscale.com/net/dnscache+
|
||||||
tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+
|
tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+
|
||||||
|
@ -358,6 +358,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
|||||||
tailscale.com/types/structs from tailscale.com/control/controlclient+
|
tailscale.com/types/structs from tailscale.com/control/controlclient+
|
||||||
tailscale.com/types/tkatype from tailscale.com/tka+
|
tailscale.com/types/tkatype from tailscale.com/tka+
|
||||||
tailscale.com/types/views from tailscale.com/ipn/ipnlocal+
|
tailscale.com/types/views from tailscale.com/ipn/ipnlocal+
|
||||||
|
tailscale.com/util/cibuild from tailscale.com/health
|
||||||
tailscale.com/util/clientmetric from tailscale.com/control/controlclient+
|
tailscale.com/util/clientmetric from tailscale.com/control/controlclient+
|
||||||
tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+
|
tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+
|
||||||
tailscale.com/util/cmpver from tailscale.com/net/dns+
|
tailscale.com/util/cmpver from tailscale.com/net/dns+
|
||||||
|
@ -651,6 +651,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo
|
|||||||
conf := wgengine.Config{
|
conf := wgengine.Config{
|
||||||
ListenPort: args.port,
|
ListenPort: args.port,
|
||||||
NetMon: sys.NetMon.Get(),
|
NetMon: sys.NetMon.Get(),
|
||||||
|
HealthTracker: sys.HealthTracker(),
|
||||||
Dialer: sys.Dialer.Get(),
|
Dialer: sys.Dialer.Get(),
|
||||||
SetSubsystem: sys.Set,
|
SetSubsystem: sys.Set,
|
||||||
ControlKnobs: sys.ControlKnobs(),
|
ControlKnobs: sys.ControlKnobs(),
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@ -18,6 +19,7 @@
|
|||||||
"tailscale.com/envknob"
|
"tailscale.com/envknob"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/types/opt"
|
"tailscale.com/types/opt"
|
||||||
|
"tailscale.com/util/cibuild"
|
||||||
"tailscale.com/util/mak"
|
"tailscale.com/util/mak"
|
||||||
"tailscale.com/util/multierr"
|
"tailscale.com/util/multierr"
|
||||||
"tailscale.com/util/set"
|
"tailscale.com/util/set"
|
||||||
@ -152,6 +154,11 @@ func (t *Tracker) nil() bool {
|
|||||||
if t != nil {
|
if t != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if cibuild.On() {
|
||||||
|
stack := make([]byte, 1<<10)
|
||||||
|
stack = stack[:runtime.Stack(stack, false)]
|
||||||
|
fmt.Fprintf(os.Stderr, "## WARNING: (non-fatal) nil health.Tracker (being strict in CI):\n%s\n", stack)
|
||||||
|
}
|
||||||
// TODO(bradfitz): open source our "unexpected" package
|
// TODO(bradfitz): open source our "unexpected" package
|
||||||
// and use it here to capture samples of stacks where
|
// and use it here to capture samples of stacks where
|
||||||
// t is nil.
|
// t is nil.
|
||||||
|
@ -327,6 +327,16 @@ type LocalBackend struct {
|
|||||||
outgoingFiles map[string]*ipn.OutgoingFile
|
outgoingFiles map[string]*ipn.OutgoingFile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HealthTracker returns the health tracker for the backend.
|
||||||
|
func (b *LocalBackend) HealthTracker() *health.Tracker {
|
||||||
|
return b.health
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetMon returns the network monitor for the backend.
|
||||||
|
func (b *LocalBackend) NetMon() *netmon.Monitor {
|
||||||
|
return b.sys.NetMon.Get()
|
||||||
|
}
|
||||||
|
|
||||||
type updateStatus struct {
|
type updateStatus struct {
|
||||||
started bool
|
started bool
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"tailscale.com/health"
|
|
||||||
"tailscale.com/health/healthmsg"
|
"tailscale.com/health/healthmsg"
|
||||||
"tailscale.com/ipn"
|
"tailscale.com/ipn"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
@ -59,11 +58,11 @@ type tkaState struct {
|
|||||||
// b.mu must be held.
|
// b.mu must be held.
|
||||||
func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
||||||
if b.tka == nil && !b.capTailnetLock {
|
if b.tka == nil && !b.capTailnetLock {
|
||||||
health.Global.SetTKAHealth(nil)
|
b.health.SetTKAHealth(nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.tka == nil {
|
if b.tka == nil {
|
||||||
health.Global.SetTKAHealth(nil)
|
b.health.SetTKAHealth(nil)
|
||||||
return // TKA not enabled.
|
return // TKA not enabled.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,9 +116,9 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
|
|||||||
|
|
||||||
// Check that we ourselves are not locked out, report a health issue if so.
|
// Check that we ourselves are not locked out, report a health issue if so.
|
||||||
if nm.SelfNode.Valid() && b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) != nil {
|
if nm.SelfNode.Valid() && b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) != nil {
|
||||||
health.Global.SetTKAHealth(errors.New(healthmsg.LockedOut))
|
b.health.SetTKAHealth(errors.New(healthmsg.LockedOut))
|
||||||
} else {
|
} else {
|
||||||
health.Global.SetTKAHealth(nil)
|
b.health.SetTKAHealth(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,7 +187,7 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie
|
|||||||
b.logf("Disablement failed, leaving TKA enabled. Error: %v", err)
|
b.logf("Disablement failed, leaving TKA enabled. Error: %v", err)
|
||||||
} else {
|
} else {
|
||||||
isEnabled = false
|
isEnabled = false
|
||||||
health.Global.SetTKAHealth(nil)
|
b.health.SetTKAHealth(nil)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("[bug] unreachable invariant of wantEnabled w/ isEnabled")
|
return fmt.Errorf("[bug] unreachable invariant of wantEnabled w/ isEnabled")
|
||||||
|
@ -199,7 +199,7 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer onDone()
|
defer onDone()
|
||||||
|
|
||||||
if strings.HasPrefix(r.URL.Path, "/localapi/") {
|
if strings.HasPrefix(r.URL.Path, "/localapi/") {
|
||||||
lah := localapi.NewHandler(lb, s.logf, s.netMon, s.backendLogID)
|
lah := localapi.NewHandler(lb, s.logf, s.backendLogID)
|
||||||
lah.PermitRead, lah.PermitWrite = s.localAPIPermissions(ci)
|
lah.PermitRead, lah.PermitWrite = s.localAPIPermissions(ci)
|
||||||
lah.PermitCert = s.connCanFetchCerts(ci)
|
lah.PermitCert = s.connCanFetchCerts(ci)
|
||||||
lah.ConnIdentity = ci
|
lah.ConnIdentity = ci
|
||||||
|
@ -140,7 +140,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
checkSTUN4 := func(derpNode *tailcfg.DERPNode) {
|
checkSTUN4 := func(derpNode *tailcfg.DERPNode) {
|
||||||
u4, err := nettype.MakePacketListenerWithNetIP(netns.Listener(h.logf, h.netMon)).ListenPacket(ctx, "udp4", ":0")
|
u4, err := nettype.MakePacketListenerWithNetIP(netns.Listener(h.logf, h.b.NetMon())).ListenPacket(ctx, "udp4", ":0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.Errors = append(st.Errors, fmt.Sprintf("Error creating IPv4 STUN listener: %v", err))
|
st.Errors = append(st.Errors, fmt.Sprintf("Error creating IPv4 STUN listener: %v", err))
|
||||||
return
|
return
|
||||||
@ -249,7 +249,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) {
|
|||||||
serverPubKeys := make(map[key.NodePublic]bool)
|
serverPubKeys := make(map[key.NodePublic]bool)
|
||||||
for i := range 5 {
|
for i := range 5 {
|
||||||
func() {
|
func() {
|
||||||
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.netMon, func() *tailcfg.DERPRegion {
|
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion {
|
||||||
return &tailcfg.DERPRegion{
|
return &tailcfg.DERPRegion{
|
||||||
RegionID: reg.RegionID,
|
RegionID: reg.RegionID,
|
||||||
RegionCode: reg.RegionCode,
|
RegionCode: reg.RegionCode,
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
"tailscale.com/clientupdate"
|
"tailscale.com/clientupdate"
|
||||||
"tailscale.com/drive"
|
"tailscale.com/drive"
|
||||||
"tailscale.com/envknob"
|
"tailscale.com/envknob"
|
||||||
"tailscale.com/health"
|
|
||||||
"tailscale.com/hostinfo"
|
"tailscale.com/hostinfo"
|
||||||
"tailscale.com/ipn"
|
"tailscale.com/ipn"
|
||||||
"tailscale.com/ipn/ipnauth"
|
"tailscale.com/ipn/ipnauth"
|
||||||
@ -156,8 +155,8 @@
|
|||||||
|
|
||||||
// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon
|
// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon
|
||||||
// are required (if non-nil it's used to do faster interface lookups).
|
// are required (if non-nil it's used to do faster interface lookups).
|
||||||
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, netMon *netmon.Monitor, logID logid.PublicID) *Handler {
|
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler {
|
||||||
return &Handler{b: b, logf: logf, netMon: netMon, backendLogID: logID, clock: tstime.StdClock{}}
|
return &Handler{b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
@ -188,7 +187,6 @@ type Handler struct {
|
|||||||
|
|
||||||
b *ipnlocal.LocalBackend
|
b *ipnlocal.LocalBackend
|
||||||
logf logger.Logf
|
logf logger.Logf
|
||||||
netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand
|
|
||||||
backendLogID logid.PublicID
|
backendLogID logid.PublicID
|
||||||
clock tstime.Clock
|
clock tstime.Clock
|
||||||
}
|
}
|
||||||
@ -358,7 +356,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
hi, _ := json.Marshal(hostinfo.New())
|
hi, _ := json.Marshal(hostinfo.New())
|
||||||
h.logf("user bugreport hostinfo: %s", hi)
|
h.logf("user bugreport hostinfo: %s", hi)
|
||||||
if err := health.Global.OverallError(); err != nil {
|
if err := h.b.HealthTracker().OverallError(); err != nil {
|
||||||
h.logf("user bugreport health: %s", err.Error())
|
h.logf("user bugreport health: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
h.logf("user bugreport health: ok")
|
h.logf("user bugreport health: ok")
|
||||||
@ -748,7 +746,7 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) {
|
|||||||
done := make(chan bool, 1)
|
done := make(chan bool, 1)
|
||||||
|
|
||||||
var c *portmapper.Client
|
var c *portmapper.Client
|
||||||
c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), h.netMon, debugKnobs, h.b.ControlKnobs(), func() {
|
c = portmapper.NewClient(logger.WithPrefix(logf, "portmapper: "), h.b.NetMon(), debugKnobs, h.b.ControlKnobs(), func() {
|
||||||
logf("portmapping changed.")
|
logf("portmapping changed.")
|
||||||
logf("have mapping: %v", c.HaveMapping())
|
logf("have mapping: %v", c.HaveMapping())
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ func (s *Server) Loopback() (addr string, proxyCred, localAPICred string, err er
|
|||||||
// out the CONNECT code from tailscaled/proxy.go that uses
|
// out the CONNECT code from tailscaled/proxy.go that uses
|
||||||
// httputil.ReverseProxy and adding auth support.
|
// httputil.ReverseProxy and adding auth support.
|
||||||
go func() {
|
go func() {
|
||||||
lah := localapi.NewHandler(s.lb, s.logf, s.netMon, s.logid)
|
lah := localapi.NewHandler(s.lb, s.logf, s.logid)
|
||||||
lah.PermitWrite = true
|
lah.PermitWrite = true
|
||||||
lah.PermitRead = true
|
lah.PermitRead = true
|
||||||
lah.RequiredPassword = s.localAPICred
|
lah.RequiredPassword = s.localAPICred
|
||||||
@ -517,11 +517,12 @@ func (s *Server) start() (reterr error) {
|
|||||||
sys := new(tsd.System)
|
sys := new(tsd.System)
|
||||||
s.dialer = &tsdial.Dialer{Logf: logf} // mutated below (before used)
|
s.dialer = &tsdial.Dialer{Logf: logf} // mutated below (before used)
|
||||||
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{
|
||||||
ListenPort: s.Port,
|
ListenPort: s.Port,
|
||||||
NetMon: s.netMon,
|
NetMon: s.netMon,
|
||||||
Dialer: s.dialer,
|
Dialer: s.dialer,
|
||||||
SetSubsystem: sys.Set,
|
SetSubsystem: sys.Set,
|
||||||
ControlKnobs: sys.ControlKnobs(),
|
ControlKnobs: sys.ControlKnobs(),
|
||||||
|
HealthTracker: sys.HealthTracker(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -606,7 +607,7 @@ func (s *Server) start() (reterr error) {
|
|||||||
go s.printAuthURLLoop()
|
go s.printAuthURLLoop()
|
||||||
|
|
||||||
// Run the localapi handler, to allow fetching LetsEncrypt certs.
|
// Run the localapi handler, to allow fetching LetsEncrypt certs.
|
||||||
lah := localapi.NewHandler(lb, logf, s.netMon, s.logid)
|
lah := localapi.NewHandler(lb, logf, s.logid)
|
||||||
lah.PermitWrite = true
|
lah.PermitWrite = true
|
||||||
lah.PermitRead = true
|
lah.PermitRead = true
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
|||||||
if testenv.InTest() && !checkControlHealthDuringNearestDERPInTests {
|
if testenv.InTest() && !checkControlHealthDuringNearestDERPInTests {
|
||||||
connectedToControl = true
|
connectedToControl = true
|
||||||
} else {
|
} else {
|
||||||
connectedToControl = health.Global.GetInPollNetMap()
|
connectedToControl = c.health.GetInPollNetMap()
|
||||||
}
|
}
|
||||||
if !connectedToControl {
|
if !connectedToControl {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
@ -201,12 +201,12 @@ func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) {
|
|||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
if !c.wantDerpLocked() {
|
if !c.wantDerpLocked() {
|
||||||
c.myDerp = 0
|
c.myDerp = 0
|
||||||
health.Global.SetMagicSockDERPHome(0, c.homeless)
|
c.health.SetMagicSockDERPHome(0, c.homeless)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if c.homeless {
|
if c.homeless {
|
||||||
c.myDerp = 0
|
c.myDerp = 0
|
||||||
health.Global.SetMagicSockDERPHome(0, c.homeless)
|
c.health.SetMagicSockDERPHome(0, c.homeless)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if derpNum == c.myDerp {
|
if derpNum == c.myDerp {
|
||||||
@ -217,7 +217,7 @@ func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) {
|
|||||||
metricDERPHomeChange.Add(1)
|
metricDERPHomeChange.Add(1)
|
||||||
}
|
}
|
||||||
c.myDerp = derpNum
|
c.myDerp = derpNum
|
||||||
health.Global.SetMagicSockDERPHome(derpNum, c.homeless)
|
c.health.SetMagicSockDERPHome(derpNum, c.homeless)
|
||||||
|
|
||||||
if c.privateKey.IsZero() {
|
if c.privateKey.IsZero() {
|
||||||
// No private key yet, so DERP connections won't come up anyway.
|
// No private key yet, so DERP connections won't come up anyway.
|
||||||
@ -400,7 +400,7 @@ func (c *Conn) derpWriteChanOfAddr(addr netip.AddrPort, peer key.NodePublic) cha
|
|||||||
}
|
}
|
||||||
return derpMap.Regions[regionID]
|
return derpMap.Regions[regionID]
|
||||||
})
|
})
|
||||||
dc.HealthTracker = health.Global
|
dc.HealthTracker = c.health
|
||||||
|
|
||||||
dc.SetCanAckPings(true)
|
dc.SetCanAckPings(true)
|
||||||
dc.NotePreferred(c.myDerp == regionID)
|
dc.NotePreferred(c.myDerp == regionID)
|
||||||
@ -526,8 +526,8 @@ func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netip.AddrPort, d
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
defer health.Global.SetDERPRegionConnectedState(regionID, false)
|
defer c.health.SetDERPRegionConnectedState(regionID, false)
|
||||||
defer health.Global.SetDERPRegionHealth(regionID, "")
|
defer c.health.SetDERPRegionHealth(regionID, "")
|
||||||
|
|
||||||
// peerPresent is the set of senders we know are present on this
|
// peerPresent is the set of senders we know are present on this
|
||||||
// connection, based on messages we've received from the server.
|
// connection, based on messages we've received from the server.
|
||||||
@ -539,7 +539,7 @@ func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netip.AddrPort, d
|
|||||||
for {
|
for {
|
||||||
msg, connGen, err := dc.RecvDetail()
|
msg, connGen, err := dc.RecvDetail()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
health.Global.SetDERPRegionConnectedState(regionID, false)
|
c.health.SetDERPRegionConnectedState(regionID, false)
|
||||||
// Forget that all these peers have routes.
|
// Forget that all these peers have routes.
|
||||||
for peer := range peerPresent {
|
for peer := range peerPresent {
|
||||||
delete(peerPresent, peer)
|
delete(peerPresent, peer)
|
||||||
@ -577,14 +577,14 @@ func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netip.AddrPort, d
|
|||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
if lastPacketTime.IsZero() || now.Sub(lastPacketTime) > frameReceiveRecordRate {
|
if lastPacketTime.IsZero() || now.Sub(lastPacketTime) > frameReceiveRecordRate {
|
||||||
health.Global.NoteDERPRegionReceivedFrame(regionID)
|
c.health.NoteDERPRegionReceivedFrame(regionID)
|
||||||
lastPacketTime = now
|
lastPacketTime = now
|
||||||
}
|
}
|
||||||
|
|
||||||
switch m := msg.(type) {
|
switch m := msg.(type) {
|
||||||
case derp.ServerInfoMessage:
|
case derp.ServerInfoMessage:
|
||||||
health.Global.SetDERPRegionConnectedState(regionID, true)
|
c.health.SetDERPRegionConnectedState(regionID, true)
|
||||||
health.Global.SetDERPRegionHealth(regionID, "") // until declared otherwise
|
c.health.SetDERPRegionHealth(regionID, "") // until declared otherwise
|
||||||
c.logf("magicsock: derp-%d connected; connGen=%v", regionID, connGen)
|
c.logf("magicsock: derp-%d connected; connGen=%v", regionID, connGen)
|
||||||
continue
|
continue
|
||||||
case derp.ReceivedPacket:
|
case derp.ReceivedPacket:
|
||||||
@ -624,7 +624,7 @@ func (c *Conn) runDerpReader(ctx context.Context, derpFakeAddr netip.AddrPort, d
|
|||||||
}()
|
}()
|
||||||
continue
|
continue
|
||||||
case derp.HealthMessage:
|
case derp.HealthMessage:
|
||||||
health.Global.SetDERPRegionHealth(regionID, m.Problem)
|
c.health.SetDERPRegionHealth(regionID, m.Problem)
|
||||||
continue
|
continue
|
||||||
case derp.PeerGoneMessage:
|
case derp.PeerGoneMessage:
|
||||||
switch m.Reason {
|
switch m.Reason {
|
||||||
|
@ -91,6 +91,7 @@ type Conn struct {
|
|||||||
testOnlyPacketListener nettype.PacketListener
|
testOnlyPacketListener nettype.PacketListener
|
||||||
noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity
|
noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity
|
||||||
netMon *netmon.Monitor // or nil
|
netMon *netmon.Monitor // or nil
|
||||||
|
health *health.Tracker // or nil
|
||||||
controlKnobs *controlknobs.Knobs // or nil
|
controlKnobs *controlknobs.Knobs // or nil
|
||||||
|
|
||||||
// ================================================================
|
// ================================================================
|
||||||
@ -369,9 +370,13 @@ type Options struct {
|
|||||||
NoteRecvActivity func(key.NodePublic)
|
NoteRecvActivity func(key.NodePublic)
|
||||||
|
|
||||||
// NetMon is the network monitor to use.
|
// NetMon is the network monitor to use.
|
||||||
// With one, the portmapper won't be used.
|
// If nil, the portmapper won't be used.
|
||||||
NetMon *netmon.Monitor
|
NetMon *netmon.Monitor
|
||||||
|
|
||||||
|
// HealthTracker optionally specifies the health tracker to
|
||||||
|
// report errors and warnings to.
|
||||||
|
HealthTracker *health.Tracker
|
||||||
|
|
||||||
// ControlKnobs are the set of control knobs to use.
|
// ControlKnobs are the set of control knobs to use.
|
||||||
// If nil, they're ignored and not updated.
|
// If nil, they're ignored and not updated.
|
||||||
ControlKnobs *controlknobs.Knobs
|
ControlKnobs *controlknobs.Knobs
|
||||||
@ -463,6 +468,7 @@ func NewConn(opts Options) (*Conn, error) {
|
|||||||
c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP)
|
c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP)
|
||||||
}
|
}
|
||||||
c.netMon = opts.NetMon
|
c.netMon = opts.NetMon
|
||||||
|
c.health = opts.HealthTracker
|
||||||
c.onPortUpdate = opts.OnPortUpdate
|
c.onPortUpdate = opts.OnPortUpdate
|
||||||
c.getPeerByKey = opts.PeerByKeyFunc
|
c.getPeerByKey = opts.PeerByKeyFunc
|
||||||
|
|
||||||
@ -666,7 +672,7 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) {
|
|||||||
// NOTE(andrew-d): I don't love that we're depending on the
|
// NOTE(andrew-d): I don't love that we're depending on the
|
||||||
// health package here, but I'd rather do that and not store
|
// health package here, but I'd rather do that and not store
|
||||||
// the exact same state in two different places.
|
// the exact same state in two different places.
|
||||||
GetLastDERPActivity: health.Global.GetDERPRegionReceivedTime,
|
GetLastDERPActivity: c.health.GetDERPRegionReceivedTime,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -2471,7 +2477,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur
|
|||||||
}
|
}
|
||||||
ruc.setConnLocked(pconn, network, c.bind.BatchSize())
|
ruc.setConnLocked(pconn, network, c.bind.BatchSize())
|
||||||
if network == "udp4" {
|
if network == "udp4" {
|
||||||
health.Global.SetUDP4Unbound(false)
|
c.health.SetUDP4Unbound(false)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -2482,7 +2488,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur
|
|||||||
// we get a link change and we can try binding again.
|
// we get a link change and we can try binding again.
|
||||||
ruc.setConnLocked(newBlockForeverConn(), "", c.bind.BatchSize())
|
ruc.setConnLocked(newBlockForeverConn(), "", c.bind.BatchSize())
|
||||||
if network == "udp4" {
|
if network == "udp4" {
|
||||||
health.Global.SetUDP4Unbound(true)
|
c.health.SetUDP4Unbound(true)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to bind any ports (tried %v)", ports)
|
return fmt.Errorf("failed to bind any ports (tried %v)", ports)
|
||||||
}
|
}
|
||||||
|
@ -3113,21 +3113,23 @@ func TestMaybeSetNearestDERP(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ht := new(health.Tracker)
|
||||||
c := newConn()
|
c := newConn()
|
||||||
c.logf = t.Logf
|
c.logf = t.Logf
|
||||||
c.myDerp = tt.old
|
c.myDerp = tt.old
|
||||||
c.derpMap = derpMap
|
c.derpMap = derpMap
|
||||||
|
c.health = ht
|
||||||
|
|
||||||
report := &netcheck.Report{PreferredDERP: tt.reportDERP}
|
report := &netcheck.Report{PreferredDERP: tt.reportDERP}
|
||||||
|
|
||||||
oldConnected := health.Global.GetInPollNetMap()
|
oldConnected := ht.GetInPollNetMap()
|
||||||
if tt.connectedToControl != oldConnected {
|
if tt.connectedToControl != oldConnected {
|
||||||
if tt.connectedToControl {
|
if tt.connectedToControl {
|
||||||
health.Global.GotStreamedMapResponse()
|
ht.GotStreamedMapResponse()
|
||||||
t.Cleanup(health.Global.SetOutOfPollNetMap)
|
t.Cleanup(ht.SetOutOfPollNetMap)
|
||||||
} else {
|
} else {
|
||||||
health.Global.SetOutOfPollNetMap()
|
ht.SetOutOfPollNetMap()
|
||||||
t.Cleanup(health.Global.GotStreamedMapResponse)
|
t.Cleanup(ht.GotStreamedMapResponse)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +98,7 @@ type userspaceEngine struct {
|
|||||||
dns *dns.Manager
|
dns *dns.Manager
|
||||||
magicConn *magicsock.Conn
|
magicConn *magicsock.Conn
|
||||||
netMon *netmon.Monitor
|
netMon *netmon.Monitor
|
||||||
|
health *health.Tracker
|
||||||
netMonOwned bool // whether we created netMon (and thus need to close it)
|
netMonOwned bool // whether we created netMon (and thus need to close it)
|
||||||
netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned
|
netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned
|
||||||
birdClient BIRDClient // or nil
|
birdClient BIRDClient // or nil
|
||||||
@ -188,6 +189,9 @@ type Config struct {
|
|||||||
// If nil, a new network monitor is created.
|
// If nil, a new network monitor is created.
|
||||||
NetMon *netmon.Monitor
|
NetMon *netmon.Monitor
|
||||||
|
|
||||||
|
// HealthTracker, if non-nil, is the health tracker to use.
|
||||||
|
HealthTracker *health.Tracker
|
||||||
|
|
||||||
// Dialer is the dialer to use for outbound connections.
|
// Dialer is the dialer to use for outbound connections.
|
||||||
// If nil, a new Dialer is created
|
// If nil, a new Dialer is created
|
||||||
Dialer *tsdial.Dialer
|
Dialer *tsdial.Dialer
|
||||||
@ -310,6 +314,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error)
|
|||||||
birdClient: conf.BIRDClient,
|
birdClient: conf.BIRDClient,
|
||||||
controlKnobs: conf.ControlKnobs,
|
controlKnobs: conf.ControlKnobs,
|
||||||
reconfigureVPN: conf.ReconfigureVPN,
|
reconfigureVPN: conf.ReconfigureVPN,
|
||||||
|
health: conf.HealthTracker,
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.birdClient != nil {
|
if e.birdClient != nil {
|
||||||
@ -372,6 +377,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error)
|
|||||||
IdleFunc: e.tundev.IdleDuration,
|
IdleFunc: e.tundev.IdleDuration,
|
||||||
NoteRecvActivity: e.noteRecvActivity,
|
NoteRecvActivity: e.noteRecvActivity,
|
||||||
NetMon: e.netMon,
|
NetMon: e.netMon,
|
||||||
|
HealthTracker: e.health,
|
||||||
ControlKnobs: conf.ControlKnobs,
|
ControlKnobs: conf.ControlKnobs,
|
||||||
OnPortUpdate: onPortUpdate,
|
OnPortUpdate: onPortUpdate,
|
||||||
PeerByKeyFunc: e.PeerByKey,
|
PeerByKeyFunc: e.PeerByKey,
|
||||||
@ -970,7 +976,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config,
|
|||||||
e.logf("wgengine: Reconfig: configuring router")
|
e.logf("wgengine: Reconfig: configuring router")
|
||||||
e.networkLogger.ReconfigRoutes(routerCfg)
|
e.networkLogger.ReconfigRoutes(routerCfg)
|
||||||
err := e.router.Set(routerCfg)
|
err := e.router.Set(routerCfg)
|
||||||
health.Global.SetRouterHealth(err)
|
e.health.SetRouterHealth(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -979,7 +985,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config,
|
|||||||
// assigned address.
|
// assigned address.
|
||||||
e.logf("wgengine: Reconfig: configuring DNS")
|
e.logf("wgengine: Reconfig: configuring DNS")
|
||||||
err = e.dns.Set(*dnsCfg)
|
err = e.dns.Set(*dnsCfg)
|
||||||
health.Global.SetDNSHealth(err)
|
e.health.SetDNSHealth(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1183,7 +1189,7 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) {
|
|||||||
e.logf("[v1] LinkChange: minor")
|
e.logf("[v1] LinkChange: minor")
|
||||||
}
|
}
|
||||||
|
|
||||||
health.Global.SetAnyInterfaceUp(up)
|
e.health.SetAnyInterfaceUp(up)
|
||||||
e.magicConn.SetNetworkUp(up)
|
e.magicConn.SetNetworkUp(up)
|
||||||
if !up || changed {
|
if !up || changed {
|
||||||
if err := e.dns.FlushCaches(); err != nil {
|
if err := e.dns.FlushCaches(); err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user