mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-25 19:15:34 +00:00
cmd/tailscale/cli: prevent concurrent Start calls in 'up'
Seems to deflake tstest/integration tests. I can't reproduce it anymore on one of my VMs that was consistently flaking after a dozen runs before. Now I can run hundreds of times. Updates #11649 Fixes #7036 Change-Id: I2f7d4ae97500d507bdd78af9e92cd1242e8e44b8 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
26f9bbc02b
commit
0fba9e7570
@ -496,11 +496,23 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE
|
|||||||
running := make(chan bool, 1) // gets value once in state ipn.Running
|
running := make(chan bool, 1) // gets value once in state ipn.Running
|
||||||
pumpErr := make(chan error, 1)
|
pumpErr := make(chan error, 1)
|
||||||
|
|
||||||
var printed bool // whether we've yet printed anything to stdout or stderr
|
// localAPIMu should be held while doing mutable LocalAPI calls
|
||||||
var loginOnce sync.Once
|
// to the backend. In particular, it prevents StartLoginInteractive from
|
||||||
startLoginInteractive := func() { loginOnce.Do(func() { localClient.StartLoginInteractive(ctx) }) }
|
// being called from the watcher goroutine while the Start call from
|
||||||
|
// the other goroutine is in progress.
|
||||||
|
// See https://github.com/tailscale/tailscale/issues/7036#issuecomment-2053771466
|
||||||
|
// TODO(bradfitz): simplify this once #11649 is cleaned up and Start is
|
||||||
|
// hopefully removed.
|
||||||
|
var localAPIMu sync.Mutex
|
||||||
|
|
||||||
|
startLoginInteractive := sync.OnceFunc(func() {
|
||||||
|
localAPIMu.Lock()
|
||||||
|
defer localAPIMu.Unlock()
|
||||||
|
localClient.StartLoginInteractive(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
var printed bool // whether we've yet printed anything to stdout or stderr
|
||||||
for {
|
for {
|
||||||
n, err := watcher.Next()
|
n, err := watcher.Next()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -574,12 +586,14 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE
|
|||||||
// Special case: bare "tailscale up" means to just start
|
// Special case: bare "tailscale up" means to just start
|
||||||
// running, if there's ever been a login.
|
// running, if there's ever been a login.
|
||||||
if simpleUp {
|
if simpleUp {
|
||||||
|
localAPIMu.Lock()
|
||||||
_, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{
|
_, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{
|
||||||
Prefs: ipn.Prefs{
|
Prefs: ipn.Prefs{
|
||||||
WantRunning: true,
|
WantRunning: true,
|
||||||
},
|
},
|
||||||
WantRunningSet: true,
|
WantRunningSet: true,
|
||||||
})
|
})
|
||||||
|
localAPIMu.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -596,10 +610,13 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := localClient.Start(ctx, ipn.Options{
|
localAPIMu.Lock()
|
||||||
|
err = localClient.Start(ctx, ipn.Options{
|
||||||
AuthKey: authKey,
|
AuthKey: authKey,
|
||||||
UpdatePrefs: prefs,
|
UpdatePrefs: prefs,
|
||||||
}); err != nil {
|
})
|
||||||
|
localAPIMu.Unlock()
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if upArgs.forceReauth {
|
if upArgs.forceReauth {
|
||||||
|
@ -345,7 +345,6 @@ func TestConfigFileAuthKey(t *testing.T) {
|
|||||||
|
|
||||||
func TestTwoNodes(t *testing.T) {
|
func TestTwoNodes(t *testing.T) {
|
||||||
tstest.Shard(t)
|
tstest.Shard(t)
|
||||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/3598")
|
|
||||||
tstest.Parallel(t)
|
tstest.Parallel(t)
|
||||||
env := newTestEnv(t)
|
env := newTestEnv(t)
|
||||||
|
|
||||||
@ -400,7 +399,6 @@ func TestTwoNodes(t *testing.T) {
|
|||||||
// PeersRemoved set) saying that the second node disappeared.
|
// PeersRemoved set) saying that the second node disappeared.
|
||||||
func TestIncrementalMapUpdatePeersRemoved(t *testing.T) {
|
func TestIncrementalMapUpdatePeersRemoved(t *testing.T) {
|
||||||
tstest.Shard(t)
|
tstest.Shard(t)
|
||||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/3598")
|
|
||||||
tstest.Parallel(t)
|
tstest.Parallel(t)
|
||||||
env := newTestEnv(t)
|
env := newTestEnv(t)
|
||||||
|
|
||||||
@ -658,17 +656,14 @@ func TestNoControlConnWhenDown(t *testing.T) {
|
|||||||
d2 := n1.StartDaemon()
|
d2 := n1.StartDaemon()
|
||||||
n1.AwaitResponding()
|
n1.AwaitResponding()
|
||||||
|
|
||||||
st := n1.MustStatus()
|
n1.AwaitBackendState("Stopped")
|
||||||
if got, want := st.BackendState, "Stopped"; got != want {
|
|
||||||
t.Fatalf("after restart, state = %q; want %q", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
ip2 := n1.AwaitIP4()
|
ip2 := n1.AwaitIP4()
|
||||||
if ip1 != ip2 {
|
if ip1 != ip2 {
|
||||||
t.Errorf("IPs different: %q vs %q", ip1, ip2)
|
t.Errorf("IPs different: %q vs %q", ip1, ip2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The real test: verify our daemon doesn't have an HTTP request open.:
|
// The real test: verify our daemon doesn't have an HTTP request open.
|
||||||
if n := env.Control.InServeMap(); n != 0 {
|
if n := env.Control.InServeMap(); n != 0 {
|
||||||
t.Errorf("in serve map = %d; want 0", n)
|
t.Errorf("in serve map = %d; want 0", n)
|
||||||
}
|
}
|
||||||
@ -1007,7 +1002,6 @@ func newTestEnv(t testing.TB, opts ...testEnvOpt) *testEnv {
|
|||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
t.Skip("not tested/working on Windows yet")
|
t.Skip("not tested/working on Windows yet")
|
||||||
}
|
}
|
||||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7036")
|
|
||||||
derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1")
|
derpMap := RunDERPAndSTUN(t, logger.Discard, "127.0.0.1")
|
||||||
logc := new(LogCatcher)
|
logc := new(LogCatcher)
|
||||||
control := &testcontrol.Server{
|
control := &testcontrol.Server{
|
||||||
@ -1389,6 +1383,10 @@ func (n *testNode) AwaitIP6() netip.Addr {
|
|||||||
|
|
||||||
// AwaitRunning waits for n to reach the IPN state "Running".
|
// AwaitRunning waits for n to reach the IPN state "Running".
|
||||||
func (n *testNode) AwaitRunning() {
|
func (n *testNode) AwaitRunning() {
|
||||||
|
n.AwaitBackendState("Running")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) AwaitBackendState(state string) {
|
||||||
t := n.env.t
|
t := n.env.t
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if err := tstest.WaitFor(20*time.Second, func() error {
|
if err := tstest.WaitFor(20*time.Second, func() error {
|
||||||
@ -1396,8 +1394,8 @@ func (n *testNode) AwaitRunning() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if st.BackendState != "Running" {
|
if st.BackendState != state {
|
||||||
return fmt.Errorf("in state %q", st.BackendState)
|
return fmt.Errorf("in state %q; want %q", st.BackendState, state)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
@ -626,7 +626,7 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTwoDevicePing(t *testing.T) {
|
func TestTwoDevicePing(t *testing.T) {
|
||||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/1277")
|
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762")
|
||||||
l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
|
l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
|
||||||
n := &devices{
|
n := &devices{
|
||||||
m1: l,
|
m1: l,
|
||||||
|
Loading…
Reference in New Issue
Block a user