mirror of
https://github.com/tailscale/tailscale.git
synced 2025-02-18 02:48:40 +00:00
ipn: use tstime (#8597)
Updates #8587 Signed-off-by: Claire Wang <claire@tailscale.com>
This commit is contained in:
parent
c1ecae13ab
commit
2315bf246a
@ -61,7 +61,7 @@ func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
|
||||
if secs == 0 {
|
||||
secs -= 1
|
||||
}
|
||||
until := time.Now().Add(time.Duration(secs) * time.Second)
|
||||
until := b.clock.Now().Add(time.Duration(secs) * time.Second)
|
||||
err := b.SetComponentDebugLogging(component, until)
|
||||
var res struct {
|
||||
Error string `json:",omitempty"`
|
||||
|
@ -93,7 +93,7 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string, syncRenewa
|
||||
return nil, errors.New("invalid domain")
|
||||
}
|
||||
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
||||
now := time.Now()
|
||||
now := b.clock.Now()
|
||||
traceACME := func(v any) {
|
||||
if !acmeDebug() {
|
||||
return
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
@ -37,22 +38,22 @@ type expiryManager struct {
|
||||
// time.Now().Add(clockDelta) == MapResponse.ControlTime
|
||||
clockDelta syncs.AtomicValue[time.Duration]
|
||||
|
||||
logf logger.Logf
|
||||
timeNow func() time.Time
|
||||
logf logger.Logf
|
||||
clock tstime.Clock
|
||||
}
|
||||
|
||||
func newExpiryManager(logf logger.Logf) *expiryManager {
|
||||
return &expiryManager{
|
||||
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
||||
logf: logf,
|
||||
timeNow: time.Now,
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
}
|
||||
|
||||
// onControlTime is called whenever we receive a new timestamp from the control
|
||||
// server to store the delta.
|
||||
func (em *expiryManager) onControlTime(t time.Time) {
|
||||
localNow := em.timeNow()
|
||||
localNow := em.clock.Now()
|
||||
delta := t.Sub(localNow)
|
||||
if delta.Abs() > minClockDelta {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
@ -110,8 +111,7 @@ func TestFlagExpiredPeers(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
em := newExpiryManager(t.Logf)
|
||||
em.timeNow = func() time.Time { return now }
|
||||
|
||||
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||
if tt.controlTime != nil {
|
||||
em.onControlTime(*tt.controlTime)
|
||||
}
|
||||
@ -241,7 +241,7 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
em := newExpiryManager(t.Logf)
|
||||
em.timeNow = func() time.Time { return now }
|
||||
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||
got := em.nextPeerExpiry(tt.netmap, now)
|
||||
if !got.Equal(tt.want) {
|
||||
t.Errorf("got %q, want %q", got.Format(time.RFC3339), tt.want.Format(time.RFC3339))
|
||||
@ -254,7 +254,7 @@ func TestNextPeerExpiry(t *testing.T) {
|
||||
t.Run("ClockSkew", func(t *testing.T) {
|
||||
t.Logf("local time: %q", now.Format(time.RFC3339))
|
||||
em := newExpiryManager(t.Logf)
|
||||
em.timeNow = func() time.Time { return now }
|
||||
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||
|
||||
// The local clock is "running fast"; our clock skew is -2h
|
||||
em.clockDelta.Store(-2 * time.Hour)
|
||||
|
@ -60,6 +60,7 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tsd"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/empty"
|
||||
"tailscale.com/types/key"
|
||||
@ -201,7 +202,7 @@ type LocalBackend struct {
|
||||
hostinfo *tailcfg.Hostinfo
|
||||
// netMap is not mutated in-place once set.
|
||||
netMap *netmap.NetworkMap
|
||||
nmExpiryTimer *time.Timer // for updating netMap on node expiry; can be nil
|
||||
nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil
|
||||
nodeByAddr map[netip.Addr]*tailcfg.Node
|
||||
activeLogin string // last logged LoginName from netMap
|
||||
engineStatus ipn.EngineStatus
|
||||
@ -259,6 +260,7 @@ type LocalBackend struct {
|
||||
// tkaSyncLock MUST be taken before mu (or inversely, mu must not be held
|
||||
// at the moment that tkaSyncLock is taken).
|
||||
tkaSyncLock sync.Mutex
|
||||
clock tstime.Clock
|
||||
}
|
||||
|
||||
// clientGen is a func that creates a control plane client.
|
||||
@ -293,13 +295,14 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
portpoll := new(portlist.Poller)
|
||||
clock := tstime.StdClock{}
|
||||
|
||||
b := &LocalBackend{
|
||||
ctx: ctx,
|
||||
ctxCancel: cancel,
|
||||
logf: logf,
|
||||
keyLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
||||
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
||||
keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
|
||||
statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
|
||||
sys: sys,
|
||||
e: e,
|
||||
dialer: dialer,
|
||||
@ -311,6 +314,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
em: newExpiryManager(logf),
|
||||
gotPortPollRes: make(chan struct{}),
|
||||
loginFlags: loginFlags,
|
||||
clock: clock,
|
||||
}
|
||||
|
||||
netMon := sys.NetMon.Get()
|
||||
@ -348,7 +352,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
for _, component := range debuggableComponents {
|
||||
key := componentStateKey(component)
|
||||
if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil {
|
||||
if until := time.Unix(ut, 0); until.After(time.Now()) {
|
||||
if until := time.Unix(ut, 0); until.After(b.clock.Now()) {
|
||||
// conditional to avoid log spam at start when off
|
||||
b.SetComponentDebugLogging(component, until)
|
||||
}
|
||||
@ -360,7 +364,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
|
||||
type componentLogState struct {
|
||||
until time.Time
|
||||
timer *time.Timer // if non-nil, the AfterFunc to disable it
|
||||
timer tstime.TimerController // if non-nil, the AfterFunc to disable it
|
||||
}
|
||||
|
||||
var debuggableComponents = []string{
|
||||
@ -413,7 +417,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim
|
||||
return t.Unix()
|
||||
}
|
||||
ipn.PutStoreInt(b.store, componentStateKey(component), timeUnixOrZero(until))
|
||||
now := time.Now()
|
||||
now := b.clock.Now()
|
||||
on := now.Before(until)
|
||||
setEnabled(on)
|
||||
var onFor time.Duration
|
||||
@ -428,7 +432,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim
|
||||
}
|
||||
newSt := componentLogState{until: until}
|
||||
if on {
|
||||
newSt.timer = time.AfterFunc(onFor, func() {
|
||||
newSt.timer = b.clock.AfterFunc(onFor, func() {
|
||||
// Turn off logging after the timer fires, as long as the state is
|
||||
// unchanged when the timer actually fires.
|
||||
b.mu.Lock()
|
||||
@ -450,7 +454,7 @@ func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
now := b.clock.Now()
|
||||
ls := b.componentLogUntil[component]
|
||||
if ls.until.IsZero() || ls.until.Before(now) {
|
||||
return time.Time{}
|
||||
@ -877,7 +881,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
||||
|
||||
// Handle node expiry in the netmap
|
||||
if st.NetMap != nil {
|
||||
now := time.Now()
|
||||
now := b.clock.Now()
|
||||
b.em.flagExpiredPeers(st.NetMap, now)
|
||||
|
||||
// Always stop the existing netmap timer if we have a netmap;
|
||||
@ -897,7 +901,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
||||
nextExpiry := b.em.nextPeerExpiry(st.NetMap, now)
|
||||
if !nextExpiry.IsZero() {
|
||||
tmrDuration := nextExpiry.Sub(now) + 10*time.Second
|
||||
b.nmExpiryTimer = time.AfterFunc(tmrDuration, func() {
|
||||
b.nmExpiryTimer = b.clock.AfterFunc(tmrDuration, func() {
|
||||
// Skip if the world has moved on past the
|
||||
// saved call (e.g. if we race stopping this
|
||||
// timer).
|
||||
@ -919,7 +923,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
||||
keyExpiryExtended := false
|
||||
if st.NetMap != nil {
|
||||
wasExpired := b.keyExpired
|
||||
isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(time.Now())
|
||||
isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(b.clock.Now())
|
||||
if wasExpired && !isExpired {
|
||||
keyExpiryExtended = true
|
||||
}
|
||||
@ -1380,13 +1384,13 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
||||
// prevent it from restarting our map poll
|
||||
// HTTP request (via doSetHostinfoFilterServices >
|
||||
// cli.SetHostinfo). In practice this is very quick.
|
||||
t0 := time.Now()
|
||||
timer := time.NewTimer(time.Second)
|
||||
t0 := b.clock.Now()
|
||||
timer, timerChannel := b.clock.NewTimer(time.Second)
|
||||
select {
|
||||
case <-b.gotPortPollRes:
|
||||
b.logf("[v1] got initial portlist info in %v", time.Since(t0).Round(time.Millisecond))
|
||||
b.logf("[v1] got initial portlist info in %v", b.clock.Since(t0).Round(time.Millisecond))
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
case <-timerChannel:
|
||||
b.logf("timeout waiting for initial portlist")
|
||||
}
|
||||
})
|
||||
@ -1809,13 +1813,13 @@ func dnsMapsEqual(new, old *netmap.NetworkMap) bool {
|
||||
// b.portpoll and propagates them into the controlclient's HostInfo.
|
||||
func (b *LocalBackend) readPoller() {
|
||||
isFirst := true
|
||||
ticker := time.NewTicker(portlist.PollInterval())
|
||||
ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval())
|
||||
defer ticker.Stop()
|
||||
initChan := make(chan struct{})
|
||||
close(initChan)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-tickerChannel:
|
||||
case <-b.ctx.Done():
|
||||
return
|
||||
case <-initChan:
|
||||
@ -1984,11 +1988,11 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
|
||||
// pollRequestEngineStatus calls b.RequestEngineStatus every 2 seconds until ctx
|
||||
// is done.
|
||||
func (b *LocalBackend) pollRequestEngineStatus(ctx context.Context) {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
ticker, tickerChannel := b.clock.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-tickerChannel:
|
||||
b.RequestEngineStatus()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@ -2398,12 +2402,12 @@ func (b *LocalBackend) StartLoginInteractive() {
|
||||
|
||||
func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg.PingType) (*ipnstate.PingResult, error) {
|
||||
if pingType == tailcfg.PingPeerAPI {
|
||||
t0 := time.Now()
|
||||
t0 := b.clock.Now()
|
||||
node, base, err := b.pingPeerAPI(ctx, ip)
|
||||
if err != nil && ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
d := time.Since(t0)
|
||||
d := b.clock.Since(t0)
|
||||
pr := &ipnstate.PingResult{
|
||||
IP: ip.String(),
|
||||
NodeIP: ip.String(),
|
||||
@ -4774,7 +4778,7 @@ func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) {
|
||||
// opting-out of rate limits. Limit ourselves to at most one message
|
||||
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
||||
// not block for too long but slow enough that we can upload all lines.
|
||||
logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, time.Now)
|
||||
logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now)
|
||||
|
||||
var checks []doctor.Check
|
||||
checks = append(checks,
|
||||
|
@ -304,7 +304,7 @@ func (s *peerAPIServer) DeleteFile(baseName string) error {
|
||||
}
|
||||
var bo *backoff.Backoff
|
||||
logf := s.b.logf
|
||||
t0 := time.Now()
|
||||
t0 := s.b.clock.Now()
|
||||
for {
|
||||
err := os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
@ -323,7 +323,7 @@ func (s *peerAPIServer) DeleteFile(baseName string) error {
|
||||
if bo == nil {
|
||||
bo = backoff.NewBackoff("delete-retry", logf, 1*time.Second)
|
||||
}
|
||||
if time.Since(t0) < 5*time.Second {
|
||||
if s.b.clock.Since(t0) < 5*time.Second {
|
||||
bo.BackOff(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
@ -1000,7 +1000,7 @@ func (f *incomingFile) Write(p []byte) (n int, err error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.copied += int64(n)
|
||||
now := time.Now()
|
||||
now := b.clock.Now()
|
||||
if f.lastNotify.IsZero() || now.Sub(f.lastNotify) > time.Second {
|
||||
f.lastNotify = now
|
||||
needNotify = true
|
||||
@ -1113,7 +1113,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "bad filename", 400)
|
||||
return
|
||||
}
|
||||
t0 := time.Now()
|
||||
t0 := h.ps.b.clock.Now()
|
||||
// TODO(bradfitz): prevent same filename being sent by two peers at once
|
||||
partialFile := dstFile + partialSuffix
|
||||
f, err := os.Create(partialFile)
|
||||
@ -1133,7 +1133,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
||||
if r.ContentLength != 0 {
|
||||
inFile = &incomingFile{
|
||||
name: baseName,
|
||||
started: time.Now(),
|
||||
started: h.ps.b.clock.Now(),
|
||||
size: r.ContentLength,
|
||||
w: f,
|
||||
ph: h,
|
||||
@ -1171,7 +1171,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
d := time.Since(t0).Round(time.Second / 10)
|
||||
d := h.ps.b.clock.Since(t0).Round(time.Second / 10)
|
||||
h.logf("got put of %s in %v from %v/%v", approxSize(finalSize), d, h.remoteAddr.Addr(), h.peerNode.ComputedName)
|
||||
|
||||
// TODO: set modtime
|
||||
|
@ -457,6 +457,7 @@ func TestHandlePeerAPI(t *testing.T) {
|
||||
logf: e.logBuf.Logf,
|
||||
capFileSharing: tt.capSharing,
|
||||
netMap: &netmap.NetworkMap{SelfNode: selfNode},
|
||||
clock: &tstest.Clock{},
|
||||
}
|
||||
e.ph = &peerAPIHandler{
|
||||
isSelf: tt.isSelf,
|
||||
@ -506,6 +507,7 @@ func TestFileDeleteRace(t *testing.T) {
|
||||
b: &LocalBackend{
|
||||
logf: t.Logf,
|
||||
capFileSharing: true,
|
||||
clock: &tstest.Clock{},
|
||||
},
|
||||
rootDir: dir,
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
"tailscale.com/net/portmapper"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/logid"
|
||||
@ -129,7 +130,7 @@ var (
|
||||
// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon
|
||||
// are required (if non-nil it's used to do faster interface lookups).
|
||||
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, netMon *netmon.Monitor, logID logid.PublicID) *Handler {
|
||||
return &Handler{b: b, logf: logf, netMon: netMon, backendLogID: logID}
|
||||
return &Handler{b: b, logf: logf, netMon: netMon, backendLogID: logID, clock: tstime.StdClock{}}
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
@ -155,6 +156,7 @@ type Handler struct {
|
||||
logf logger.Logf
|
||||
netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand
|
||||
backendLogID logid.PublicID
|
||||
clock tstime.Clock
|
||||
}
|
||||
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@ -309,7 +311,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
|
||||
defer h.b.TryFlushLogs() // kick off upload after bugreport's done logging
|
||||
|
||||
logMarker := func() string {
|
||||
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, time.Now().UTC().Format("20060102150405Z"), randHex(8))
|
||||
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, h.clock.Now().UTC().Format("20060102150405Z"), randHex(8))
|
||||
}
|
||||
if envknob.NoLogsNoSupport() {
|
||||
logMarker = func() string { return "BUG-NO-LOGS-NO-SUPPORT-this-node-has-had-its-logging-disabled" }
|
||||
@ -355,7 +357,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
until := time.Now().Add(12 * time.Hour)
|
||||
until := h.clock.Now().Add(12 * time.Hour)
|
||||
|
||||
var changed map[string]bool
|
||||
for _, component := range []string{"magicsock"} {
|
||||
@ -766,7 +768,7 @@ func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
component := r.FormValue("component")
|
||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||
err := h.b.SetComponentDebugLogging(component, time.Now().Add(time.Duration(secs)*time.Second))
|
||||
err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second))
|
||||
var res struct {
|
||||
Error string
|
||||
}
|
||||
@ -1887,7 +1889,7 @@ func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) {
|
||||
// opting-out of rate limits. Limit ourselves to at most one message
|
||||
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
||||
// not block for too long but slow enough that we can upload all lines.
|
||||
logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, time.Now)
|
||||
logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now)
|
||||
|
||||
for _, line := range logRequest.Lines {
|
||||
logf("%s", line)
|
||||
|
Loading…
x
Reference in New Issue
Block a user