mirror of
https://github.com/tailscale/tailscale.git
synced 2025-04-16 11:41:39 +00:00
ipn: use tstime (#8597)
Updates #8587 Signed-off-by: Claire Wang <claire@tailscale.com>
This commit is contained in:
parent
c1ecae13ab
commit
2315bf246a
@ -61,7 +61,7 @@ func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
|
|||||||
if secs == 0 {
|
if secs == 0 {
|
||||||
secs -= 1
|
secs -= 1
|
||||||
}
|
}
|
||||||
until := time.Now().Add(time.Duration(secs) * time.Second)
|
until := b.clock.Now().Add(time.Duration(secs) * time.Second)
|
||||||
err := b.SetComponentDebugLogging(component, until)
|
err := b.SetComponentDebugLogging(component, until)
|
||||||
var res struct {
|
var res struct {
|
||||||
Error string `json:",omitempty"`
|
Error string `json:",omitempty"`
|
||||||
|
@ -93,7 +93,7 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string, syncRenewa
|
|||||||
return nil, errors.New("invalid domain")
|
return nil, errors.New("invalid domain")
|
||||||
}
|
}
|
||||||
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
||||||
now := time.Now()
|
now := b.clock.Now()
|
||||||
traceACME := func(v any) {
|
traceACME := func(v any) {
|
||||||
if !acmeDebug() {
|
if !acmeDebug() {
|
||||||
return
|
return
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"tailscale.com/syncs"
|
"tailscale.com/syncs"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/tstime"
|
||||||
"tailscale.com/types/key"
|
"tailscale.com/types/key"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
"tailscale.com/types/netmap"
|
"tailscale.com/types/netmap"
|
||||||
@ -38,21 +39,21 @@ type expiryManager struct {
|
|||||||
clockDelta syncs.AtomicValue[time.Duration]
|
clockDelta syncs.AtomicValue[time.Duration]
|
||||||
|
|
||||||
logf logger.Logf
|
logf logger.Logf
|
||||||
timeNow func() time.Time
|
clock tstime.Clock
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExpiryManager(logf logger.Logf) *expiryManager {
|
func newExpiryManager(logf logger.Logf) *expiryManager {
|
||||||
return &expiryManager{
|
return &expiryManager{
|
||||||
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
||||||
logf: logf,
|
logf: logf,
|
||||||
timeNow: time.Now,
|
clock: tstime.StdClock{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// onControlTime is called whenever we receive a new timestamp from the control
|
// onControlTime is called whenever we receive a new timestamp from the control
|
||||||
// server to store the delta.
|
// server to store the delta.
|
||||||
func (em *expiryManager) onControlTime(t time.Time) {
|
func (em *expiryManager) onControlTime(t time.Time) {
|
||||||
localNow := em.timeNow()
|
localNow := em.clock.Now()
|
||||||
delta := t.Sub(localNow)
|
delta := t.Sub(localNow)
|
||||||
if delta.Abs() > minClockDelta {
|
if delta.Abs() > minClockDelta {
|
||||||
em.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
|
em.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/tstest"
|
||||||
"tailscale.com/types/key"
|
"tailscale.com/types/key"
|
||||||
"tailscale.com/types/netmap"
|
"tailscale.com/types/netmap"
|
||||||
)
|
)
|
||||||
@ -110,8 +111,7 @@ func TestFlagExpiredPeers(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
em := newExpiryManager(t.Logf)
|
em := newExpiryManager(t.Logf)
|
||||||
em.timeNow = func() time.Time { return now }
|
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||||
|
|
||||||
if tt.controlTime != nil {
|
if tt.controlTime != nil {
|
||||||
em.onControlTime(*tt.controlTime)
|
em.onControlTime(*tt.controlTime)
|
||||||
}
|
}
|
||||||
@ -241,7 +241,7 @@ func TestNextPeerExpiry(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
em := newExpiryManager(t.Logf)
|
em := newExpiryManager(t.Logf)
|
||||||
em.timeNow = func() time.Time { return now }
|
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||||
got := em.nextPeerExpiry(tt.netmap, now)
|
got := em.nextPeerExpiry(tt.netmap, now)
|
||||||
if !got.Equal(tt.want) {
|
if !got.Equal(tt.want) {
|
||||||
t.Errorf("got %q, want %q", got.Format(time.RFC3339), tt.want.Format(time.RFC3339))
|
t.Errorf("got %q, want %q", got.Format(time.RFC3339), tt.want.Format(time.RFC3339))
|
||||||
@ -254,7 +254,7 @@ func TestNextPeerExpiry(t *testing.T) {
|
|||||||
t.Run("ClockSkew", func(t *testing.T) {
|
t.Run("ClockSkew", func(t *testing.T) {
|
||||||
t.Logf("local time: %q", now.Format(time.RFC3339))
|
t.Logf("local time: %q", now.Format(time.RFC3339))
|
||||||
em := newExpiryManager(t.Logf)
|
em := newExpiryManager(t.Logf)
|
||||||
em.timeNow = func() time.Time { return now }
|
em.clock = tstest.NewClock(tstest.ClockOpts{Start: now})
|
||||||
|
|
||||||
// The local clock is "running fast"; our clock skew is -2h
|
// The local clock is "running fast"; our clock skew is -2h
|
||||||
em.clockDelta.Store(-2 * time.Hour)
|
em.clockDelta.Store(-2 * time.Hour)
|
||||||
|
@ -60,6 +60,7 @@ import (
|
|||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/tka"
|
"tailscale.com/tka"
|
||||||
"tailscale.com/tsd"
|
"tailscale.com/tsd"
|
||||||
|
"tailscale.com/tstime"
|
||||||
"tailscale.com/types/dnstype"
|
"tailscale.com/types/dnstype"
|
||||||
"tailscale.com/types/empty"
|
"tailscale.com/types/empty"
|
||||||
"tailscale.com/types/key"
|
"tailscale.com/types/key"
|
||||||
@ -201,7 +202,7 @@ type LocalBackend struct {
|
|||||||
hostinfo *tailcfg.Hostinfo
|
hostinfo *tailcfg.Hostinfo
|
||||||
// netMap is not mutated in-place once set.
|
// netMap is not mutated in-place once set.
|
||||||
netMap *netmap.NetworkMap
|
netMap *netmap.NetworkMap
|
||||||
nmExpiryTimer *time.Timer // for updating netMap on node expiry; can be nil
|
nmExpiryTimer tstime.TimerController // for updating netMap on node expiry; can be nil
|
||||||
nodeByAddr map[netip.Addr]*tailcfg.Node
|
nodeByAddr map[netip.Addr]*tailcfg.Node
|
||||||
activeLogin string // last logged LoginName from netMap
|
activeLogin string // last logged LoginName from netMap
|
||||||
engineStatus ipn.EngineStatus
|
engineStatus ipn.EngineStatus
|
||||||
@ -259,6 +260,7 @@ type LocalBackend struct {
|
|||||||
// tkaSyncLock MUST be taken before mu (or inversely, mu must not be held
|
// tkaSyncLock MUST be taken before mu (or inversely, mu must not be held
|
||||||
// at the moment that tkaSyncLock is taken).
|
// at the moment that tkaSyncLock is taken).
|
||||||
tkaSyncLock sync.Mutex
|
tkaSyncLock sync.Mutex
|
||||||
|
clock tstime.Clock
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientGen is a func that creates a control plane client.
|
// clientGen is a func that creates a control plane client.
|
||||||
@ -293,13 +295,14 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
portpoll := new(portlist.Poller)
|
portpoll := new(portlist.Poller)
|
||||||
|
clock := tstime.StdClock{}
|
||||||
|
|
||||||
b := &LocalBackend{
|
b := &LocalBackend{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
ctxCancel: cancel,
|
ctxCancel: cancel,
|
||||||
logf: logf,
|
logf: logf,
|
||||||
keyLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
|
||||||
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now),
|
||||||
sys: sys,
|
sys: sys,
|
||||||
e: e,
|
e: e,
|
||||||
dialer: dialer,
|
dialer: dialer,
|
||||||
@ -311,6 +314,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
em: newExpiryManager(logf),
|
em: newExpiryManager(logf),
|
||||||
gotPortPollRes: make(chan struct{}),
|
gotPortPollRes: make(chan struct{}),
|
||||||
loginFlags: loginFlags,
|
loginFlags: loginFlags,
|
||||||
|
clock: clock,
|
||||||
}
|
}
|
||||||
|
|
||||||
netMon := sys.NetMon.Get()
|
netMon := sys.NetMon.Get()
|
||||||
@ -348,7 +352,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
for _, component := range debuggableComponents {
|
for _, component := range debuggableComponents {
|
||||||
key := componentStateKey(component)
|
key := componentStateKey(component)
|
||||||
if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil {
|
if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil {
|
||||||
if until := time.Unix(ut, 0); until.After(time.Now()) {
|
if until := time.Unix(ut, 0); until.After(b.clock.Now()) {
|
||||||
// conditional to avoid log spam at start when off
|
// conditional to avoid log spam at start when off
|
||||||
b.SetComponentDebugLogging(component, until)
|
b.SetComponentDebugLogging(component, until)
|
||||||
}
|
}
|
||||||
@ -360,7 +364,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
|
|
||||||
type componentLogState struct {
|
type componentLogState struct {
|
||||||
until time.Time
|
until time.Time
|
||||||
timer *time.Timer // if non-nil, the AfterFunc to disable it
|
timer tstime.TimerController // if non-nil, the AfterFunc to disable it
|
||||||
}
|
}
|
||||||
|
|
||||||
var debuggableComponents = []string{
|
var debuggableComponents = []string{
|
||||||
@ -413,7 +417,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim
|
|||||||
return t.Unix()
|
return t.Unix()
|
||||||
}
|
}
|
||||||
ipn.PutStoreInt(b.store, componentStateKey(component), timeUnixOrZero(until))
|
ipn.PutStoreInt(b.store, componentStateKey(component), timeUnixOrZero(until))
|
||||||
now := time.Now()
|
now := b.clock.Now()
|
||||||
on := now.Before(until)
|
on := now.Before(until)
|
||||||
setEnabled(on)
|
setEnabled(on)
|
||||||
var onFor time.Duration
|
var onFor time.Duration
|
||||||
@ -428,7 +432,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim
|
|||||||
}
|
}
|
||||||
newSt := componentLogState{until: until}
|
newSt := componentLogState{until: until}
|
||||||
if on {
|
if on {
|
||||||
newSt.timer = time.AfterFunc(onFor, func() {
|
newSt.timer = b.clock.AfterFunc(onFor, func() {
|
||||||
// Turn off logging after the timer fires, as long as the state is
|
// Turn off logging after the timer fires, as long as the state is
|
||||||
// unchanged when the timer actually fires.
|
// unchanged when the timer actually fires.
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
@ -450,7 +454,7 @@ func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time {
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
now := time.Now()
|
now := b.clock.Now()
|
||||||
ls := b.componentLogUntil[component]
|
ls := b.componentLogUntil[component]
|
||||||
if ls.until.IsZero() || ls.until.Before(now) {
|
if ls.until.IsZero() || ls.until.Before(now) {
|
||||||
return time.Time{}
|
return time.Time{}
|
||||||
@ -877,7 +881,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||||||
|
|
||||||
// Handle node expiry in the netmap
|
// Handle node expiry in the netmap
|
||||||
if st.NetMap != nil {
|
if st.NetMap != nil {
|
||||||
now := time.Now()
|
now := b.clock.Now()
|
||||||
b.em.flagExpiredPeers(st.NetMap, now)
|
b.em.flagExpiredPeers(st.NetMap, now)
|
||||||
|
|
||||||
// Always stop the existing netmap timer if we have a netmap;
|
// Always stop the existing netmap timer if we have a netmap;
|
||||||
@ -897,7 +901,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||||||
nextExpiry := b.em.nextPeerExpiry(st.NetMap, now)
|
nextExpiry := b.em.nextPeerExpiry(st.NetMap, now)
|
||||||
if !nextExpiry.IsZero() {
|
if !nextExpiry.IsZero() {
|
||||||
tmrDuration := nextExpiry.Sub(now) + 10*time.Second
|
tmrDuration := nextExpiry.Sub(now) + 10*time.Second
|
||||||
b.nmExpiryTimer = time.AfterFunc(tmrDuration, func() {
|
b.nmExpiryTimer = b.clock.AfterFunc(tmrDuration, func() {
|
||||||
// Skip if the world has moved on past the
|
// Skip if the world has moved on past the
|
||||||
// saved call (e.g. if we race stopping this
|
// saved call (e.g. if we race stopping this
|
||||||
// timer).
|
// timer).
|
||||||
@ -919,7 +923,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||||||
keyExpiryExtended := false
|
keyExpiryExtended := false
|
||||||
if st.NetMap != nil {
|
if st.NetMap != nil {
|
||||||
wasExpired := b.keyExpired
|
wasExpired := b.keyExpired
|
||||||
isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(time.Now())
|
isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(b.clock.Now())
|
||||||
if wasExpired && !isExpired {
|
if wasExpired && !isExpired {
|
||||||
keyExpiryExtended = true
|
keyExpiryExtended = true
|
||||||
}
|
}
|
||||||
@ -1380,13 +1384,13 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||||||
// prevent it from restarting our map poll
|
// prevent it from restarting our map poll
|
||||||
// HTTP request (via doSetHostinfoFilterServices >
|
// HTTP request (via doSetHostinfoFilterServices >
|
||||||
// cli.SetHostinfo). In practice this is very quick.
|
// cli.SetHostinfo). In practice this is very quick.
|
||||||
t0 := time.Now()
|
t0 := b.clock.Now()
|
||||||
timer := time.NewTimer(time.Second)
|
timer, timerChannel := b.clock.NewTimer(time.Second)
|
||||||
select {
|
select {
|
||||||
case <-b.gotPortPollRes:
|
case <-b.gotPortPollRes:
|
||||||
b.logf("[v1] got initial portlist info in %v", time.Since(t0).Round(time.Millisecond))
|
b.logf("[v1] got initial portlist info in %v", b.clock.Since(t0).Round(time.Millisecond))
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
case <-timer.C:
|
case <-timerChannel:
|
||||||
b.logf("timeout waiting for initial portlist")
|
b.logf("timeout waiting for initial portlist")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -1809,13 +1813,13 @@ func dnsMapsEqual(new, old *netmap.NetworkMap) bool {
|
|||||||
// b.portpoll and propagates them into the controlclient's HostInfo.
|
// b.portpoll and propagates them into the controlclient's HostInfo.
|
||||||
func (b *LocalBackend) readPoller() {
|
func (b *LocalBackend) readPoller() {
|
||||||
isFirst := true
|
isFirst := true
|
||||||
ticker := time.NewTicker(portlist.PollInterval())
|
ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval())
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
initChan := make(chan struct{})
|
initChan := make(chan struct{})
|
||||||
close(initChan)
|
close(initChan)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-tickerChannel:
|
||||||
case <-b.ctx.Done():
|
case <-b.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-initChan:
|
case <-initChan:
|
||||||
@ -1984,11 +1988,11 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
|
|||||||
// pollRequestEngineStatus calls b.RequestEngineStatus every 2 seconds until ctx
|
// pollRequestEngineStatus calls b.RequestEngineStatus every 2 seconds until ctx
|
||||||
// is done.
|
// is done.
|
||||||
func (b *LocalBackend) pollRequestEngineStatus(ctx context.Context) {
|
func (b *LocalBackend) pollRequestEngineStatus(ctx context.Context) {
|
||||||
ticker := time.NewTicker(2 * time.Second)
|
ticker, tickerChannel := b.clock.NewTicker(2 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-tickerChannel:
|
||||||
b.RequestEngineStatus()
|
b.RequestEngineStatus()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
@ -2398,12 +2402,12 @@ func (b *LocalBackend) StartLoginInteractive() {
|
|||||||
|
|
||||||
func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg.PingType) (*ipnstate.PingResult, error) {
|
func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg.PingType) (*ipnstate.PingResult, error) {
|
||||||
if pingType == tailcfg.PingPeerAPI {
|
if pingType == tailcfg.PingPeerAPI {
|
||||||
t0 := time.Now()
|
t0 := b.clock.Now()
|
||||||
node, base, err := b.pingPeerAPI(ctx, ip)
|
node, base, err := b.pingPeerAPI(ctx, ip)
|
||||||
if err != nil && ctx.Err() != nil {
|
if err != nil && ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
d := time.Since(t0)
|
d := b.clock.Since(t0)
|
||||||
pr := &ipnstate.PingResult{
|
pr := &ipnstate.PingResult{
|
||||||
IP: ip.String(),
|
IP: ip.String(),
|
||||||
NodeIP: ip.String(),
|
NodeIP: ip.String(),
|
||||||
@ -4774,7 +4778,7 @@ func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) {
|
|||||||
// opting-out of rate limits. Limit ourselves to at most one message
|
// opting-out of rate limits. Limit ourselves to at most one message
|
||||||
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
||||||
// not block for too long but slow enough that we can upload all lines.
|
// not block for too long but slow enough that we can upload all lines.
|
||||||
logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, time.Now)
|
logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now)
|
||||||
|
|
||||||
var checks []doctor.Check
|
var checks []doctor.Check
|
||||||
checks = append(checks,
|
checks = append(checks,
|
||||||
|
@ -304,7 +304,7 @@ func (s *peerAPIServer) DeleteFile(baseName string) error {
|
|||||||
}
|
}
|
||||||
var bo *backoff.Backoff
|
var bo *backoff.Backoff
|
||||||
logf := s.b.logf
|
logf := s.b.logf
|
||||||
t0 := time.Now()
|
t0 := s.b.clock.Now()
|
||||||
for {
|
for {
|
||||||
err := os.Remove(path)
|
err := os.Remove(path)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
@ -323,7 +323,7 @@ func (s *peerAPIServer) DeleteFile(baseName string) error {
|
|||||||
if bo == nil {
|
if bo == nil {
|
||||||
bo = backoff.NewBackoff("delete-retry", logf, 1*time.Second)
|
bo = backoff.NewBackoff("delete-retry", logf, 1*time.Second)
|
||||||
}
|
}
|
||||||
if time.Since(t0) < 5*time.Second {
|
if s.b.clock.Since(t0) < 5*time.Second {
|
||||||
bo.BackOff(context.Background(), err)
|
bo.BackOff(context.Background(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1000,7 +1000,7 @@ func (f *incomingFile) Write(p []byte) (n int, err error) {
|
|||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
defer f.mu.Unlock()
|
||||||
f.copied += int64(n)
|
f.copied += int64(n)
|
||||||
now := time.Now()
|
now := b.clock.Now()
|
||||||
if f.lastNotify.IsZero() || now.Sub(f.lastNotify) > time.Second {
|
if f.lastNotify.IsZero() || now.Sub(f.lastNotify) > time.Second {
|
||||||
f.lastNotify = now
|
f.lastNotify = now
|
||||||
needNotify = true
|
needNotify = true
|
||||||
@ -1113,7 +1113,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
|||||||
http.Error(w, "bad filename", 400)
|
http.Error(w, "bad filename", 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t0 := time.Now()
|
t0 := h.ps.b.clock.Now()
|
||||||
// TODO(bradfitz): prevent same filename being sent by two peers at once
|
// TODO(bradfitz): prevent same filename being sent by two peers at once
|
||||||
partialFile := dstFile + partialSuffix
|
partialFile := dstFile + partialSuffix
|
||||||
f, err := os.Create(partialFile)
|
f, err := os.Create(partialFile)
|
||||||
@ -1133,7 +1133,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
|||||||
if r.ContentLength != 0 {
|
if r.ContentLength != 0 {
|
||||||
inFile = &incomingFile{
|
inFile = &incomingFile{
|
||||||
name: baseName,
|
name: baseName,
|
||||||
started: time.Now(),
|
started: h.ps.b.clock.Now(),
|
||||||
size: r.ContentLength,
|
size: r.ContentLength,
|
||||||
w: f,
|
w: f,
|
||||||
ph: h,
|
ph: h,
|
||||||
@ -1171,7 +1171,7 @@ func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d := time.Since(t0).Round(time.Second / 10)
|
d := h.ps.b.clock.Since(t0).Round(time.Second / 10)
|
||||||
h.logf("got put of %s in %v from %v/%v", approxSize(finalSize), d, h.remoteAddr.Addr(), h.peerNode.ComputedName)
|
h.logf("got put of %s in %v from %v/%v", approxSize(finalSize), d, h.remoteAddr.Addr(), h.peerNode.ComputedName)
|
||||||
|
|
||||||
// TODO: set modtime
|
// TODO: set modtime
|
||||||
|
@ -457,6 +457,7 @@ func TestHandlePeerAPI(t *testing.T) {
|
|||||||
logf: e.logBuf.Logf,
|
logf: e.logBuf.Logf,
|
||||||
capFileSharing: tt.capSharing,
|
capFileSharing: tt.capSharing,
|
||||||
netMap: &netmap.NetworkMap{SelfNode: selfNode},
|
netMap: &netmap.NetworkMap{SelfNode: selfNode},
|
||||||
|
clock: &tstest.Clock{},
|
||||||
}
|
}
|
||||||
e.ph = &peerAPIHandler{
|
e.ph = &peerAPIHandler{
|
||||||
isSelf: tt.isSelf,
|
isSelf: tt.isSelf,
|
||||||
@ -506,6 +507,7 @@ func TestFileDeleteRace(t *testing.T) {
|
|||||||
b: &LocalBackend{
|
b: &LocalBackend{
|
||||||
logf: t.Logf,
|
logf: t.Logf,
|
||||||
capFileSharing: true,
|
capFileSharing: true,
|
||||||
|
clock: &tstest.Clock{},
|
||||||
},
|
},
|
||||||
rootDir: dir,
|
rootDir: dir,
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
"tailscale.com/net/portmapper"
|
"tailscale.com/net/portmapper"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/tka"
|
"tailscale.com/tka"
|
||||||
|
"tailscale.com/tstime"
|
||||||
"tailscale.com/types/key"
|
"tailscale.com/types/key"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
"tailscale.com/types/logid"
|
"tailscale.com/types/logid"
|
||||||
@ -129,7 +130,7 @@ var (
|
|||||||
// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon
|
// NewHandler creates a new LocalAPI HTTP handler. All parameters except netMon
|
||||||
// are required (if non-nil it's used to do faster interface lookups).
|
// are required (if non-nil it's used to do faster interface lookups).
|
||||||
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, netMon *netmon.Monitor, logID logid.PublicID) *Handler {
|
func NewHandler(b *ipnlocal.LocalBackend, logf logger.Logf, netMon *netmon.Monitor, logID logid.PublicID) *Handler {
|
||||||
return &Handler{b: b, logf: logf, netMon: netMon, backendLogID: logID}
|
return &Handler{b: b, logf: logf, netMon: netMon, backendLogID: logID, clock: tstime.StdClock{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
@ -155,6 +156,7 @@ type Handler struct {
|
|||||||
logf logger.Logf
|
logf logger.Logf
|
||||||
netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand
|
netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand
|
||||||
backendLogID logid.PublicID
|
backendLogID logid.PublicID
|
||||||
|
clock tstime.Clock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -309,7 +311,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer h.b.TryFlushLogs() // kick off upload after bugreport's done logging
|
defer h.b.TryFlushLogs() // kick off upload after bugreport's done logging
|
||||||
|
|
||||||
logMarker := func() string {
|
logMarker := func() string {
|
||||||
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, time.Now().UTC().Format("20060102150405Z"), randHex(8))
|
return fmt.Sprintf("BUG-%v-%v-%v", h.backendLogID, h.clock.Now().UTC().Format("20060102150405Z"), randHex(8))
|
||||||
}
|
}
|
||||||
if envknob.NoLogsNoSupport() {
|
if envknob.NoLogsNoSupport() {
|
||||||
logMarker = func() string { return "BUG-NO-LOGS-NO-SUPPORT-this-node-has-had-its-logging-disabled" }
|
logMarker = func() string { return "BUG-NO-LOGS-NO-SUPPORT-this-node-has-had-its-logging-disabled" }
|
||||||
@ -355,7 +357,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
until := time.Now().Add(12 * time.Hour)
|
until := h.clock.Now().Add(12 * time.Hour)
|
||||||
|
|
||||||
var changed map[string]bool
|
var changed map[string]bool
|
||||||
for _, component := range []string{"magicsock"} {
|
for _, component := range []string{"magicsock"} {
|
||||||
@ -766,7 +768,7 @@ func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Requ
|
|||||||
}
|
}
|
||||||
component := r.FormValue("component")
|
component := r.FormValue("component")
|
||||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||||
err := h.b.SetComponentDebugLogging(component, time.Now().Add(time.Duration(secs)*time.Second))
|
err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second))
|
||||||
var res struct {
|
var res struct {
|
||||||
Error string
|
Error string
|
||||||
}
|
}
|
||||||
@ -1887,7 +1889,7 @@ func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) {
|
|||||||
// opting-out of rate limits. Limit ourselves to at most one message
|
// opting-out of rate limits. Limit ourselves to at most one message
|
||||||
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
||||||
// not block for too long but slow enough that we can upload all lines.
|
// not block for too long but slow enough that we can upload all lines.
|
||||||
logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, time.Now)
|
logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now)
|
||||||
|
|
||||||
for _, line := range logRequest.Lines {
|
for _, line := range logRequest.Lines {
|
||||||
logf("%s", line)
|
logf("%s", line)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user