mirror of
https://github.com/tailscale/tailscale.git
synced 2025-02-18 02:48:40 +00:00
wgengine/wglog: cache strings
We repeat many peers each time we call SetPeers. Instead of constructing strings for them from scratch every time, keep strings alive across iterations. name old time/op new time/op delta SetPeers-8 3.58µs ± 1% 2.41µs ± 1% -32.60% (p=0.000 n=9+10) name old alloc/op new alloc/op delta SetPeers-8 2.53kB ± 0% 1.30kB ± 0% -48.73% (p=0.000 n=10+10) name old allocs/op new allocs/op delta SetPeers-8 99.0 ± 0% 16.0 ± 0% -83.84% (p=0.000 n=10+10) We could reduce alloc/op 12% and allocs/op 23% if strs had type map[string]strCache instead of map[string]*strCache, but that wipes out the execution time impact. Given that re-use is the most common scenario, let's optimize for it. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
This commit is contained in:
parent
c065cc6169
commit
ceaaa23962
@ -9,6 +9,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/tailscale/wireguard-go/device"
|
"github.com/tailscale/wireguard-go/device"
|
||||||
@ -22,6 +23,14 @@ import (
|
|||||||
type Logger struct {
|
type Logger struct {
|
||||||
DeviceLogger *device.Logger
|
DeviceLogger *device.Logger
|
||||||
replace atomic.Value // of map[string]string
|
replace atomic.Value // of map[string]string
|
||||||
|
mu sync.Mutex // protects strs
|
||||||
|
strs map[wgkey.Key]*strCache // cached strs used to populate replace
|
||||||
|
}
|
||||||
|
|
||||||
|
// strCache holds a wireguard-go and a Tailscale style peer string.
|
||||||
|
type strCache struct {
|
||||||
|
wg, ts string
|
||||||
|
used bool // track whether this strCache was used in a particular round
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLogger creates a new logger for use with wireguard-go.
|
// NewLogger creates a new logger for use with wireguard-go.
|
||||||
@ -76,18 +85,36 @@ func NewLogger(logf logger.Logf) *Logger {
|
|||||||
Verbosef: logger.WithPrefix(wrapper, "[v2] "),
|
Verbosef: logger.WithPrefix(wrapper, "[v2] "),
|
||||||
Errorf: wrapper,
|
Errorf: wrapper,
|
||||||
}
|
}
|
||||||
|
ret.strs = make(map[wgkey.Key]*strCache)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPeers adjusts x to rewrite the peer public keys found in peers.
|
// SetPeers adjusts x to rewrite the peer public keys found in peers.
|
||||||
// SetPeers is safe for concurrent use.
|
// SetPeers is safe for concurrent use.
|
||||||
func (x *Logger) SetPeers(peers []wgcfg.Peer) {
|
func (x *Logger) SetPeers(peers []wgcfg.Peer) {
|
||||||
|
x.mu.Lock()
|
||||||
|
defer x.mu.Unlock()
|
||||||
// Construct a new peer public key log rewriter.
|
// Construct a new peer public key log rewriter.
|
||||||
replace := make(map[string]string)
|
replace := make(map[string]string)
|
||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
old := wireguardGoString(peer.PublicKey)
|
c, ok := x.strs[peer.PublicKey] // look up cached strs
|
||||||
new := peer.PublicKey.ShortString()
|
if !ok {
|
||||||
replace[old] = new
|
wg := wireguardGoString(peer.PublicKey)
|
||||||
|
ts := peer.PublicKey.ShortString()
|
||||||
|
c = &strCache{wg: wg, ts: ts}
|
||||||
|
x.strs[peer.PublicKey] = c
|
||||||
|
}
|
||||||
|
c.used = true
|
||||||
|
replace[c.wg] = c.ts
|
||||||
|
}
|
||||||
|
// Remove any unused cached strs.
|
||||||
|
for k, c := range x.strs {
|
||||||
|
if !c.used {
|
||||||
|
delete(x.strs, k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Mark c as unused for next round.
|
||||||
|
c.used = false
|
||||||
}
|
}
|
||||||
x.replace.Store(replace)
|
x.replace.Store(replace)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user