mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-30 05:25:35 +00:00
c56e94af2d
Rather than make each ipn.StateStore implementation guard against useless writes (a write of the same value that's already in the store), do writes via a new wrapper that has a fast path for the unchanged case. This then fixes profileManager's flood of useless writes to AWS SSM, etc. Updates #8785 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
49 lines
954 B
Go
49 lines
954 B
Go
// Copyright (c) Tailscale Inc & AUTHORS
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
package ipn
|
|
|
|
import (
|
|
"bytes"
|
|
"sync"
|
|
"testing"
|
|
|
|
"tailscale.com/util/mak"
|
|
)
|
|
|
|
type memStore struct {
|
|
mu sync.Mutex
|
|
writes int
|
|
m map[StateKey][]byte
|
|
}
|
|
|
|
func (s *memStore) ReadState(k StateKey) ([]byte, error) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
return bytes.Clone(s.m[k]), nil
|
|
}
|
|
|
|
func (s *memStore) WriteState(k StateKey, v []byte) error {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
mak.Set(&s.m, k, bytes.Clone(v))
|
|
s.writes++
|
|
return nil
|
|
}
|
|
|
|
func TestWriteState(t *testing.T) {
|
|
var ss StateStore = new(memStore)
|
|
WriteState(ss, "foo", []byte("bar"))
|
|
WriteState(ss, "foo", []byte("bar"))
|
|
got, err := ss.ReadState("foo")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if want := []byte("bar"); !bytes.Equal(got, want) {
|
|
t.Errorf("got %q; want %q", got, want)
|
|
}
|
|
if got, want := ss.(*memStore).writes, 1; got != want {
|
|
t.Errorf("got %d writes; want %d", got, want)
|
|
}
|
|
}
|