mirror of
https://github.com/tailscale/tailscale.git
synced 2025-01-05 23:07:44 +00:00
ipn/ipnlocal,control/controlclient: make Logout more sync
We already removed the async API, make it more sync and remove the FinishLogout state too. This also makes the callback be synchronous again as the previous attempt was trying to work around the logout callback resulting in a client shutdown getting blocked forever. Updates #3833 Signed-off-by: Maisem Ali <maisem@tailscale.com>
This commit is contained in:
parent
1f12b3aedc
commit
959362a1f4
@ -93,12 +93,6 @@ type Status struct {
|
||||
// TODO(bradfitz): delete this and everything around Status.state.
|
||||
func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated }
|
||||
|
||||
// LogoutFinished reports whether the controlclient is in its "StateNotAuthenticated"
|
||||
// state where we don't want to be logged in.
|
||||
//
|
||||
// TODO(bradfitz): delete this and everything around Status.state.
|
||||
func (s *Status) LogoutFinished() bool { return s.state == StateNotAuthenticated }
|
||||
|
||||
// StateForTest returns the internal state of s for tests only.
|
||||
func (s *Status) StateForTest() State { return s.state }
|
||||
|
||||
|
@ -984,25 +984,6 @@ func (b *LocalBackend) SetControlClientStatus(st controlclient.Status) {
|
||||
|
||||
// Lock b once and do only the things that require locking.
|
||||
b.mu.Lock()
|
||||
inShutdown := b.shutdownCalled
|
||||
|
||||
if st.LogoutFinished() {
|
||||
if p := b.pm.CurrentPrefs(); !p.Persist().Valid() || p.Persist().UserProfile().LoginName() == "" {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if err := b.pm.DeleteProfile(b.pm.CurrentProfile().ID); err != nil {
|
||||
b.logf("error deleting profile: %v", err)
|
||||
}
|
||||
if inShutdown {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if err := b.resetForProfileChangeLockedOnEntry(); err != nil {
|
||||
b.logf("resetForProfileChangeLockedOnEntry err: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
prefsChanged := false
|
||||
prefs := b.pm.CurrentPrefs().AsStruct()
|
||||
@ -3729,10 +3710,16 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State) {
|
||||
|
||||
}
|
||||
|
||||
// hasNodeKey reports whether a non-zero node key is present in the current
|
||||
// prefs.
|
||||
func (b *LocalBackend) hasNodeKey() bool {
|
||||
// we can't use b.Prefs(), because it strips the keys, oops!
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.hasNodeKeyLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) hasNodeKeyLocked() bool {
|
||||
// we can't use b.Prefs(), because it strips the keys, oops!
|
||||
p := b.pm.CurrentPrefs()
|
||||
return p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero()
|
||||
}
|
||||
@ -3741,13 +3728,10 @@ func (b *LocalBackend) hasNodeKey() bool {
|
||||
func (b *LocalBackend) NodeKey() key.NodePublic {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
p := b.pm.CurrentPrefs()
|
||||
if !p.Valid() || !p.Persist().Valid() || p.Persist().PrivateNodeKey().IsZero() {
|
||||
if !b.hasNodeKeyLocked() {
|
||||
return key.NodePublic{}
|
||||
}
|
||||
|
||||
return p.Persist().PublicNodeKey()
|
||||
return b.pm.CurrentPrefs().Persist().PublicNodeKey()
|
||||
}
|
||||
|
||||
// nextState returns the state the backend seems to be in, based on
|
||||
@ -3884,19 +3868,7 @@ func (b *LocalBackend) resetControlClientLockedAsync() {
|
||||
// will abort.
|
||||
b.numClientStatusCalls.Add(1)
|
||||
}
|
||||
|
||||
if testenv.InTest() {
|
||||
// From 2021-04-20 to 2023-08-31 we always did this shutdown
|
||||
// asynchronously. But tests flaked on it sometimes, as our
|
||||
// tests often do resource leak checks at the end to make sure
|
||||
// everything's shut down. So do this synchronously in tests
|
||||
// to deflake tests.
|
||||
//
|
||||
// TODO(bradfitz,maisem): do this always?
|
||||
b.cc.Shutdown()
|
||||
} else {
|
||||
go b.cc.Shutdown()
|
||||
}
|
||||
b.cc.Shutdown()
|
||||
b.cc = nil
|
||||
b.ccAuto = nil
|
||||
}
|
||||
@ -3936,14 +3908,26 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool {
|
||||
|
||||
func (b *LocalBackend) LogoutSync(ctx context.Context) error {
|
||||
b.mu.Lock()
|
||||
if !b.hasNodeKeyLocked() {
|
||||
// Already logged out.
|
||||
b.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
cc := b.cc
|
||||
|
||||
// Grab the current profile before we unlock the mutex, so that we can
|
||||
// delete it later.
|
||||
profile := b.pm.CurrentProfile()
|
||||
b.mu.Unlock()
|
||||
|
||||
b.EditPrefs(&ipn.MaskedPrefs{
|
||||
_, err := b.EditPrefs(&ipn.MaskedPrefs{
|
||||
WantRunningSet: true,
|
||||
LoggedOutSet: true,
|
||||
Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear any previous dial plan(s), if set.
|
||||
b.dialPlan.Store(nil)
|
||||
@ -3959,9 +3943,16 @@ func (b *LocalBackend) LogoutSync(ctx context.Context) error {
|
||||
return errors.New("no controlclient")
|
||||
}
|
||||
|
||||
err := cc.Logout(ctx)
|
||||
b.stateMachine()
|
||||
return err
|
||||
if err := cc.Logout(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
b.mu.Lock()
|
||||
if err := b.pm.DeleteProfile(profile.ID); err != nil {
|
||||
b.mu.Unlock()
|
||||
b.logf("error deleting profile: %v", err)
|
||||
return err
|
||||
}
|
||||
return b.resetForProfileChangeLockedOnEntry()
|
||||
}
|
||||
|
||||
// assertClientLocked crashes if there is no controlclient in this backend.
|
||||
|
@ -439,6 +439,7 @@ func (pm *profileManager) NewProfile() {
|
||||
// defaultPrefs is the default prefs for a new profile.
|
||||
var defaultPrefs = func() ipn.PrefsView {
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.LoggedOut = true
|
||||
prefs.WantRunning = false
|
||||
|
||||
prefs.ControlURL = winutil.GetPolicyString("LoginURL", "")
|
||||
|
@ -353,7 +353,7 @@ func TestStateMachine(t *testing.T) {
|
||||
// Note: a totally fresh system has Prefs.LoggedOut=false by
|
||||
// default. We are logged out, but not because the user asked
|
||||
// for it, so it doesn't count as Prefs.LoggedOut==true.
|
||||
c.Assert(prefs.LoggedOut(), qt.IsFalse)
|
||||
c.Assert(prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(prefs.WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
@ -374,7 +374,7 @@ func TestStateMachine(t *testing.T) {
|
||||
cc.assertCalls()
|
||||
c.Assert(nn[0].Prefs, qt.IsNotNil)
|
||||
c.Assert(nn[1].State, qt.IsNotNil)
|
||||
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse)
|
||||
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
@ -412,7 +412,7 @@ func TestStateMachine(t *testing.T) {
|
||||
nn := notifies.drain(1)
|
||||
|
||||
c.Assert(nn[0].Prefs, qt.IsNotNil)
|
||||
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse)
|
||||
c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
@ -579,58 +579,38 @@ func TestStateMachine(t *testing.T) {
|
||||
// User wants to logout.
|
||||
store.awaitWrite()
|
||||
t.Logf("\n\nLogout")
|
||||
notifies.expect(2)
|
||||
notifies.expect(5)
|
||||
b.LogoutSync(context.Background())
|
||||
{
|
||||
nn := notifies.drain(2)
|
||||
cc.assertCalls("pause", "Logout")
|
||||
nn := notifies.drain(5)
|
||||
previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown")
|
||||
c.Assert(nn[0].State, qt.IsNotNil)
|
||||
c.Assert(*nn[0].State, qt.Equals, ipn.Stopped)
|
||||
|
||||
c.Assert(nn[1].Prefs, qt.IsNotNil)
|
||||
c.Assert(ipn.Stopped, qt.Equals, *nn[0].State)
|
||||
c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.Stopped, qt.Equals, b.State())
|
||||
|
||||
cc.assertCalls("New")
|
||||
c.Assert(nn[2].State, qt.IsNotNil)
|
||||
c.Assert(*nn[2].State, qt.Equals, ipn.NoState)
|
||||
|
||||
c.Assert(nn[3].Prefs, qt.IsNotNil) // emptyPrefs
|
||||
c.Assert(nn[3].Prefs.LoggedOut(), qt.IsTrue)
|
||||
c.Assert(nn[3].Prefs.WantRunning(), qt.IsFalse)
|
||||
|
||||
c.Assert(nn[4].State, qt.IsNotNil)
|
||||
c.Assert(*nn[4].State, qt.Equals, ipn.NeedsLogin)
|
||||
|
||||
c.Assert(b.State(), qt.Equals, ipn.NeedsLogin)
|
||||
|
||||
c.Assert(store.sawWrite(), qt.IsTrue)
|
||||
}
|
||||
|
||||
// Let's make the logout succeed.
|
||||
t.Logf("\n\nLogout - succeed")
|
||||
notifies.expect(3)
|
||||
cc.send(nil, "", false, nil)
|
||||
{
|
||||
previousCC.assertShutdown(true)
|
||||
nn := notifies.drain(3)
|
||||
cc.assertCalls("New")
|
||||
c.Assert(nn[0].State, qt.IsNotNil)
|
||||
c.Assert(*nn[0].State, qt.Equals, ipn.NoState)
|
||||
c.Assert(nn[1].Prefs, qt.IsNotNil) // emptyPrefs
|
||||
c.Assert(nn[2].State, qt.IsNotNil)
|
||||
c.Assert(*nn[2].State, qt.Equals, ipn.NeedsLogin)
|
||||
c.Assert(b.Prefs().LoggedOut(), qt.IsFalse)
|
||||
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
|
||||
c.Assert(b.State(), qt.Equals, ipn.NeedsLogin)
|
||||
}
|
||||
|
||||
// A second logout should reset all prefs.
|
||||
// A second logout should be a no-op as we are in the NeedsLogin state.
|
||||
t.Logf("\n\nLogout2")
|
||||
notifies.expect(1)
|
||||
b.LogoutSync(context.Background())
|
||||
{
|
||||
nn := notifies.drain(1)
|
||||
c.Assert(nn[0].Prefs, qt.IsNotNil) // emptyPrefs
|
||||
// BUG: the backend has already called StartLogout, and we're
|
||||
// still logged out. So it shouldn't call it again.
|
||||
cc.assertCalls("Logout")
|
||||
cc.assertCalls()
|
||||
c.Assert(b.Prefs().LoggedOut(), qt.IsTrue)
|
||||
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
// Let's acknowledge the second logout too.
|
||||
t.Logf("\n\nLogout2 (async) - succeed")
|
||||
notifies.expect(0)
|
||||
cc.send(nil, "", false, nil)
|
||||
b.LogoutSync(context.Background())
|
||||
{
|
||||
notifies.drain(0)
|
||||
cc.assertCalls()
|
||||
@ -639,24 +619,11 @@ func TestStateMachine(t *testing.T) {
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
// Try the synchronous logout feature.
|
||||
t.Logf("\n\nLogout (sync)")
|
||||
notifies.expect(0)
|
||||
// A third logout should also be a no-op as the cc should be in
|
||||
// AuthCantContinue state.
|
||||
t.Logf("\n\nLogout3")
|
||||
notifies.expect(3)
|
||||
b.LogoutSync(context.Background())
|
||||
// NOTE: This returns as soon as cc.Logout() returns, which is okay
|
||||
// I guess, since that's supposed to be synchronous.
|
||||
{
|
||||
notifies.drain(0)
|
||||
cc.assertCalls("Logout")
|
||||
c.Assert(b.Prefs().LoggedOut(), qt.IsTrue)
|
||||
c.Assert(b.Prefs().WantRunning(), qt.IsFalse)
|
||||
c.Assert(ipn.NeedsLogin, qt.Equals, b.State())
|
||||
}
|
||||
|
||||
// Generate the third logout event.
|
||||
t.Logf("\n\nLogout3 (sync) - succeed")
|
||||
notifies.expect(0)
|
||||
cc.send(nil, "", false, nil)
|
||||
{
|
||||
notifies.drain(0)
|
||||
cc.assertCalls()
|
||||
|
@ -945,9 +945,11 @@ func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon {
|
||||
|
||||
func (n *testNode) MustUp(extraArgs ...string) {
|
||||
t := n.env.t
|
||||
t.Helper()
|
||||
args := []string{
|
||||
"up",
|
||||
"--login-server=" + n.env.ControlServer.URL,
|
||||
"--reset",
|
||||
}
|
||||
args = append(args, extraArgs...)
|
||||
cmd := n.Tailscale(args...)
|
||||
|
Loading…
x
Reference in New Issue
Block a user