2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package ipn
|
|
|
|
|
|
|
|
import (
|
2020-03-14 03:53:58 +00:00
|
|
|
"context"
|
2020-02-03 18:35:52 +00:00
|
|
|
"errors"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/tailscale/wireguard-go/wgcfg"
|
2020-05-11 21:02:12 +00:00
|
|
|
"inet.af/netaddr"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/control/controlclient"
|
2020-03-26 05:57:46 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2020-04-01 04:48:33 +00:00
|
|
|
"tailscale.com/ipn/policy"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/portlist"
|
|
|
|
"tailscale.com/tailcfg"
|
2020-02-14 21:09:19 +00:00
|
|
|
"tailscale.com/types/empty"
|
2020-03-26 05:57:46 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-02-15 03:23:16 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/version"
|
|
|
|
"tailscale.com/wgengine"
|
|
|
|
"tailscale.com/wgengine/filter"
|
2020-05-11 21:02:12 +00:00
|
|
|
"tailscale.com/wgengine/router"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// LocalBackend is the scaffolding between the Tailscale cloud control
|
2020-02-25 18:04:20 +00:00
|
|
|
// plane and the local network stack, wiring up NetworkMap updates
|
|
|
|
// from the cloud to the local WireGuard engine.
|
2020-02-05 22:16:58 +00:00
|
|
|
type LocalBackend struct {
|
2020-03-14 03:53:58 +00:00
|
|
|
ctx context.Context // valid until Close
|
|
|
|
ctxCancel context.CancelFunc // closes ctx
|
2020-02-05 22:16:58 +00:00
|
|
|
logf logger.Logf
|
2020-05-15 20:13:44 +00:00
|
|
|
keyLogf logger.Logf
|
2020-02-05 22:16:58 +00:00
|
|
|
e wgengine.Engine
|
2020-02-03 18:35:52 +00:00
|
|
|
store StateStore
|
2020-02-25 20:30:28 +00:00
|
|
|
serverURL string // tailcontrol URL
|
2020-02-05 22:16:58 +00:00
|
|
|
backendLogID string
|
|
|
|
portpoll *portlist.Poller // may be nil
|
|
|
|
newDecompressor func() (controlclient.Decompressor, error)
|
2020-03-25 07:47:29 +00:00
|
|
|
lastFilterPrint time.Time
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
// The mutex protects the following elements.
|
|
|
|
mu sync.Mutex
|
2020-02-25 20:30:28 +00:00
|
|
|
notify func(Notify)
|
|
|
|
c *controlclient.Client // TODO: appears to be (inconsistently) guarded by mu
|
2020-02-03 18:35:52 +00:00
|
|
|
stateKey StateKey
|
2020-02-20 19:07:00 +00:00
|
|
|
prefs *Prefs
|
2020-02-05 22:16:58 +00:00
|
|
|
state State
|
2020-02-25 18:04:20 +00:00
|
|
|
hiCache *tailcfg.Hostinfo
|
2020-02-28 19:39:13 +00:00
|
|
|
netMapCache *controlclient.NetworkMap
|
2020-02-28 19:58:46 +00:00
|
|
|
engineStatus EngineStatus
|
2020-02-28 20:12:49 +00:00
|
|
|
endpoints []string
|
2020-02-05 22:16:58 +00:00
|
|
|
blocked bool
|
|
|
|
authURL string
|
|
|
|
interact int
|
|
|
|
|
|
|
|
// statusLock must be held before calling statusChanged.Lock() or
|
|
|
|
// statusChanged.Broadcast().
|
|
|
|
statusLock sync.Mutex
|
|
|
|
statusChanged *sync.Cond
|
|
|
|
}
|
|
|
|
|
2020-02-03 18:35:52 +00:00
|
|
|
// NewLocalBackend returns a new LocalBackend that is ready to run,
|
|
|
|
// but is not actually running.
|
|
|
|
func NewLocalBackend(logf logger.Logf, logid string, store StateStore, e wgengine.Engine) (*LocalBackend, error) {
|
2020-02-05 22:16:58 +00:00
|
|
|
if e == nil {
|
|
|
|
panic("ipn.NewLocalBackend: wgengine must not be nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default filter blocks everything, until Start() is called.
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
e.SetFilter(filter.NewAllowNone(logf))
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-03-14 03:53:58 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-02-05 22:16:58 +00:00
|
|
|
portpoll, err := portlist.NewPoller()
|
|
|
|
if err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("skipping portlist: %s", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 15:36:32 +00:00
|
|
|
b := &LocalBackend{
|
2020-03-14 03:53:58 +00:00
|
|
|
ctx: ctx,
|
|
|
|
ctxCancel: cancel,
|
2020-02-05 22:16:58 +00:00
|
|
|
logf: logf,
|
2020-05-15 20:13:44 +00:00
|
|
|
keyLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
2020-02-05 22:16:58 +00:00
|
|
|
e: e,
|
2020-02-03 18:35:52 +00:00
|
|
|
store: store,
|
2020-02-05 22:16:58 +00:00
|
|
|
backendLogID: logid,
|
|
|
|
state: NoState,
|
|
|
|
portpoll: portpoll,
|
|
|
|
}
|
|
|
|
b.statusChanged = sync.NewCond(&b.statusLock)
|
|
|
|
|
|
|
|
if b.portpoll != nil {
|
2020-03-14 03:53:58 +00:00
|
|
|
go b.portpoll.Run(ctx)
|
2020-04-29 09:23:29 +00:00
|
|
|
go b.readPoller()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 15:36:32 +00:00
|
|
|
return b, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) Shutdown() {
|
2020-03-14 03:53:58 +00:00
|
|
|
b.ctxCancel()
|
2020-02-05 22:16:58 +00:00
|
|
|
b.c.Shutdown()
|
|
|
|
b.e.Close()
|
|
|
|
b.e.Wait()
|
|
|
|
}
|
|
|
|
|
2020-03-26 05:57:46 +00:00
|
|
|
// Status returns the latest status of the Tailscale network from all the various components.
|
|
|
|
func (b *LocalBackend) Status() *ipnstate.Status {
|
|
|
|
sb := new(ipnstate.StatusBuilder)
|
|
|
|
b.UpdateStatus(sb)
|
|
|
|
return sb.Status()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
|
|
|
b.e.UpdateStatus(sb)
|
|
|
|
|
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
|
|
|
// TODO: hostinfo, and its networkinfo
|
|
|
|
// TODO: EngineStatus copy (and deprecate it?)
|
|
|
|
if b.netMapCache != nil {
|
|
|
|
for id, up := range b.netMapCache.UserProfiles {
|
|
|
|
sb.AddUser(id, up)
|
|
|
|
}
|
|
|
|
for _, p := range b.netMapCache.Peers {
|
|
|
|
var lastSeen time.Time
|
|
|
|
if p.LastSeen != nil {
|
|
|
|
lastSeen = *p.LastSeen
|
|
|
|
}
|
|
|
|
var tailAddr string
|
|
|
|
if len(p.Addresses) > 0 {
|
|
|
|
tailAddr = strings.TrimSuffix(p.Addresses[0].String(), "/32")
|
|
|
|
}
|
|
|
|
sb.AddPeer(key.Public(p.Key), &ipnstate.PeerStatus{
|
|
|
|
InNetworkMap: true,
|
|
|
|
UserID: p.User,
|
|
|
|
TailAddr: tailAddr,
|
|
|
|
HostName: p.Hostinfo.Hostname,
|
|
|
|
OS: p.Hostinfo.OS,
|
|
|
|
KeepAlive: p.KeepAlive,
|
|
|
|
Created: p.Created,
|
|
|
|
LastSeen: lastSeen,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// SetDecompressor sets a decompression function, which must be a zstd
|
|
|
|
// reader.
|
|
|
|
//
|
|
|
|
// This exists because the iOS/Mac NetworkExtension is very resource
|
|
|
|
// constrained, and the zstd package is too heavy to fit in the
|
|
|
|
// constrained RSS limit.
|
|
|
|
func (b *LocalBackend) SetDecompressor(fn func() (controlclient.Decompressor, error)) {
|
|
|
|
b.newDecompressor = fn
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) Start(opts Options) error {
|
2020-02-03 18:35:52 +00:00
|
|
|
if opts.Prefs == nil && opts.StateKey == "" {
|
|
|
|
return errors.New("no state key or prefs provided")
|
2020-02-03 23:58:40 +00:00
|
|
|
}
|
|
|
|
|
2020-02-03 18:35:52 +00:00
|
|
|
if opts.Prefs != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Start: %v", opts.Prefs.Pretty())
|
2020-02-03 18:35:52 +00:00
|
|
|
} else {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Start")
|
2020-02-03 18:35:52 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
hi := controlclient.NewHostinfo()
|
|
|
|
hi.BackendLogID = b.backendLogID
|
|
|
|
hi.FrontendLogID = opts.FrontendLogID
|
|
|
|
|
|
|
|
b.mu.Lock()
|
2020-02-25 20:30:28 +00:00
|
|
|
|
|
|
|
if b.c != nil {
|
|
|
|
// TODO(apenwarr): avoid the need to reinit controlclient.
|
|
|
|
// This will trigger a full relogin/reconfigure cycle every
|
|
|
|
// time a Handle reconnects to the backend. Ideally, we
|
|
|
|
// would send the new Prefs and everything would get back
|
|
|
|
// into sync with the minimal changes. But that's not how it
|
|
|
|
// is right now, which is a sign that the code is still too
|
|
|
|
// complicated.
|
|
|
|
b.c.Shutdown()
|
|
|
|
}
|
|
|
|
|
2020-02-25 18:04:20 +00:00
|
|
|
if b.hiCache != nil {
|
2020-03-13 04:38:34 +00:00
|
|
|
hi.Services = b.hiCache.Services // keep any previous session and netinfo
|
|
|
|
hi.NetInfo = b.hiCache.NetInfo
|
2020-02-25 18:04:20 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
b.hiCache = hi
|
|
|
|
b.state = NoState
|
2020-02-03 18:35:52 +00:00
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
if err := b.loadStateLocked(opts.StateKey, opts.Prefs, opts.LegacyConfigPath); err != nil {
|
2020-02-03 18:35:52 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
return fmt.Errorf("loading requested state: %v", err)
|
2020-02-03 23:58:40 +00:00
|
|
|
}
|
2020-02-03 18:35:52 +00:00
|
|
|
|
2020-02-19 05:03:22 +00:00
|
|
|
b.serverURL = b.prefs.ControlURL
|
2020-02-18 03:33:01 +00:00
|
|
|
hi.RoutableIPs = append(hi.RoutableIPs, b.prefs.AdvertiseRoutes...)
|
2020-05-01 05:01:27 +00:00
|
|
|
hi.RequestTags = append(hi.RequestTags, b.prefs.AdvertiseTags...)
|
2020-02-18 03:33:01 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
b.notify = opts.Notify
|
|
|
|
b.netMapCache = nil
|
2020-02-29 02:34:56 +00:00
|
|
|
persist := b.prefs.Persist
|
2020-03-04 20:21:40 +00:00
|
|
|
wantDERP := !b.prefs.DisableDERP
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-03-04 20:21:40 +00:00
|
|
|
b.e.SetDERPEnabled(wantDERP)
|
2020-02-28 19:39:13 +00:00
|
|
|
b.updateFilter(nil)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
var err error
|
|
|
|
if persist == nil {
|
|
|
|
// let controlclient initialize it
|
|
|
|
persist = &controlclient.Persist{}
|
|
|
|
}
|
|
|
|
cli, err := controlclient.New(controlclient.Options{
|
2020-02-25 19:35:46 +00:00
|
|
|
Logf: logger.WithPrefix(b.logf, "control: "),
|
2020-02-05 22:16:58 +00:00
|
|
|
Persist: *persist,
|
|
|
|
ServerURL: b.serverURL,
|
2020-04-09 07:16:43 +00:00
|
|
|
AuthKey: opts.AuthKey,
|
2020-02-25 18:04:20 +00:00
|
|
|
Hostinfo: hi,
|
2020-02-05 22:16:58 +00:00
|
|
|
KeepAlive: true,
|
|
|
|
NewDecompressor: b.newDecompressor,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
b.mu.Lock()
|
|
|
|
b.c = cli
|
2020-02-28 20:12:49 +00:00
|
|
|
endpoints := b.endpoints
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-02-28 20:12:49 +00:00
|
|
|
if endpoints != nil {
|
|
|
|
cli.UpdateEndpoints(0, endpoints)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
cli.SetStatusFunc(func(newSt controlclient.Status) {
|
|
|
|
if newSt.LoginFinished != nil {
|
2020-02-05 22:16:58 +00:00
|
|
|
// Auth completed, unblock the engine
|
|
|
|
b.blockEngineUpdates(false)
|
|
|
|
b.authReconfig()
|
2020-02-14 21:09:19 +00:00
|
|
|
b.send(Notify{LoginFinished: &empty.Message{}})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
if newSt.Persist != nil {
|
|
|
|
persist := *newSt.Persist // copy
|
2020-02-29 02:34:56 +00:00
|
|
|
|
|
|
|
b.mu.Lock()
|
2020-02-05 22:16:58 +00:00
|
|
|
b.prefs.Persist = &persist
|
2020-02-29 02:34:56 +00:00
|
|
|
prefs := b.prefs.Clone()
|
|
|
|
stateKey := b.stateKey
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if stateKey != "" {
|
|
|
|
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
|
2020-02-03 18:35:52 +00:00
|
|
|
b.logf("Failed to save new controlclient state: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2020-02-29 02:34:56 +00:00
|
|
|
b.send(Notify{Prefs: prefs})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
if newSt.NetMap != nil {
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Lock()
|
2020-03-13 03:01:08 +00:00
|
|
|
if b.netMapCache != nil {
|
|
|
|
diff := newSt.NetMap.ConciseDiffFrom(b.netMapCache)
|
2020-03-13 16:42:09 +00:00
|
|
|
if strings.TrimSpace(diff) == "" {
|
|
|
|
b.logf("netmap diff: (none)")
|
|
|
|
} else {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("netmap diff:\n%v", diff)
|
2020-03-13 16:42:09 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
b.netMapCache = newSt.NetMap
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
b.send(Notify{NetMap: newSt.NetMap})
|
2020-02-28 19:39:13 +00:00
|
|
|
b.updateFilter(newSt.NetMap)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
if newSt.URL != "" {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Received auth URL: %.20v...", newSt.URL)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
b.mu.Lock()
|
|
|
|
interact := b.interact
|
2020-02-25 20:30:28 +00:00
|
|
|
b.authURL = newSt.URL
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if interact > 0 {
|
|
|
|
b.popBrowserAuthNow()
|
|
|
|
}
|
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
if newSt.Err != "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
// TODO(crawshaw): display in the UI.
|
2020-02-25 20:30:28 +00:00
|
|
|
log.Print(newSt.Err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-25 20:30:28 +00:00
|
|
|
if newSt.NetMap != nil {
|
2020-02-29 02:34:56 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
if b.state == NeedsLogin {
|
2020-02-05 22:16:58 +00:00
|
|
|
b.prefs.WantRunning = true
|
|
|
|
}
|
2020-02-29 02:34:56 +00:00
|
|
|
prefs := b.prefs
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
b.SetPrefs(prefs)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
b.stateMachine()
|
|
|
|
})
|
|
|
|
|
|
|
|
b.e.SetStatusCallback(func(s *wgengine.Status, err error) {
|
|
|
|
if err != nil {
|
|
|
|
b.logf("wgengine status error: %#v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if s == nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Fatalf("weird: non-error wgengine update with status=nil")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
es := b.parseWgStatus(s)
|
2020-02-28 20:12:49 +00:00
|
|
|
|
|
|
|
b.mu.Lock()
|
2020-02-25 20:30:28 +00:00
|
|
|
c := b.c
|
2020-02-05 22:16:58 +00:00
|
|
|
b.engineStatus = es
|
2020-02-28 20:12:49 +00:00
|
|
|
b.endpoints = append([]string{}, s.LocalAddrs...)
|
2020-02-28 19:58:46 +00:00
|
|
|
b.mu.Unlock()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
if c != nil {
|
|
|
|
c.UpdateEndpoints(0, s.LocalAddrs)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
b.stateMachine()
|
|
|
|
|
|
|
|
b.statusLock.Lock()
|
|
|
|
b.statusChanged.Broadcast()
|
|
|
|
b.statusLock.Unlock()
|
|
|
|
|
|
|
|
b.send(Notify{Engine: &es})
|
|
|
|
})
|
|
|
|
|
2020-03-24 05:16:16 +00:00
|
|
|
b.e.SetNetInfoCallback(b.SetNetInfo)
|
|
|
|
|
2020-02-29 02:34:56 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
prefs := b.prefs.Clone()
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
blid := b.backendLogID
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID)
|
2020-02-05 22:16:58 +00:00
|
|
|
b.send(Notify{BackendLogID: &blid})
|
2020-02-29 02:34:56 +00:00
|
|
|
b.send(Notify{Prefs: prefs})
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-03 18:57:34 +00:00
|
|
|
cli.Login(nil, controlclient.LoginDefault)
|
2020-02-05 22:16:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-28 19:39:13 +00:00
|
|
|
func (b *LocalBackend) updateFilter(netMap *controlclient.NetworkMap) {
|
2020-04-29 06:37:35 +00:00
|
|
|
// TODO(apenwarr): don't replace filter at all if unchanged.
|
|
|
|
// TODO(apenwarr): print a diff instead of full filter.
|
2020-04-30 07:29:06 +00:00
|
|
|
if netMap == nil {
|
|
|
|
// Not configured yet, block everything
|
|
|
|
b.logf("netmap packet filter: (not ready yet)")
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
b.e.SetFilter(filter.NewAllowNone(b.logf))
|
2020-04-30 07:29:06 +00:00
|
|
|
} else if b.Prefs().ShieldsUp {
|
|
|
|
// Shields up, block everything
|
2020-04-29 06:37:35 +00:00
|
|
|
b.logf("netmap packet filter: (shields up)")
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
b.e.SetFilter(filter.NewAllowNone(b.logf))
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
2020-03-25 07:47:29 +00:00
|
|
|
now := time.Now()
|
|
|
|
if now.Sub(b.lastFilterPrint) > 1*time.Minute {
|
2020-04-30 07:29:06 +00:00
|
|
|
b.logf("netmap packet filter: %v", netMap.PacketFilter)
|
2020-03-25 07:47:29 +00:00
|
|
|
b.lastFilterPrint = now
|
|
|
|
} else {
|
2020-04-30 07:29:06 +00:00
|
|
|
b.logf("netmap packet filter: (length %d)", len(netMap.PacketFilter))
|
2020-03-25 07:47:29 +00:00
|
|
|
}
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
b.e.SetFilter(filter.New(netMap.PacketFilter, b.e.GetFilter(), b.logf))
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-29 09:23:29 +00:00
|
|
|
func (b *LocalBackend) readPoller() {
|
2020-02-05 22:16:58 +00:00
|
|
|
for {
|
2020-03-14 03:53:58 +00:00
|
|
|
ports, ok := <-b.portpoll.C
|
|
|
|
if !ok {
|
|
|
|
return
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
sl := []tailcfg.Service{}
|
|
|
|
for _, p := range ports {
|
|
|
|
s := tailcfg.Service{
|
2020-04-01 04:48:33 +00:00
|
|
|
Proto: tailcfg.ServiceProto(p.Proto),
|
2020-02-05 22:16:58 +00:00
|
|
|
Port: p.Port,
|
|
|
|
Description: p.Process,
|
|
|
|
}
|
2020-04-01 04:48:33 +00:00
|
|
|
if policy.IsInterestingService(s, version.OS()) {
|
|
|
|
sl = append(sl, s)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
b.mu.Lock()
|
2020-02-25 19:01:20 +00:00
|
|
|
if b.hiCache == nil {
|
|
|
|
// TODO(bradfitz): it's a little weird that this port poller
|
|
|
|
// is started (by NewLocalBackend) before the Start call.
|
|
|
|
b.hiCache = new(tailcfg.Hostinfo)
|
|
|
|
}
|
2020-04-29 09:23:29 +00:00
|
|
|
b.hiCache.Services = sl
|
2020-02-05 22:16:58 +00:00
|
|
|
hi := b.hiCache
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-04-29 09:23:29 +00:00
|
|
|
b.doSetHostinfoFilterServices(hi)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) send(n Notify) {
|
2020-02-25 20:30:28 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
notify := b.notify
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if notify != nil {
|
2020-02-05 22:16:58 +00:00
|
|
|
n.Version = version.LONG
|
2020-02-25 20:30:28 +00:00
|
|
|
notify(n)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) popBrowserAuthNow() {
|
|
|
|
b.mu.Lock()
|
|
|
|
url := b.authURL
|
|
|
|
b.interact = 0
|
|
|
|
b.authURL = ""
|
|
|
|
b.mu.Unlock()
|
2020-02-25 22:05:17 +00:00
|
|
|
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("popBrowserAuthNow: url=%v", url != "")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
b.blockEngineUpdates(true)
|
|
|
|
b.stopEngineAndWait()
|
|
|
|
b.send(Notify{BrowseToURL: &url})
|
|
|
|
if b.State() == Running {
|
|
|
|
b.enterState(Starting)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
// b.mu must be held
|
|
|
|
func (b *LocalBackend) loadStateLocked(key StateKey, prefs *Prefs, legacyPath string) error {
|
2020-02-14 00:38:36 +00:00
|
|
|
if prefs == nil && key == "" {
|
|
|
|
panic("state key and prefs are both unset")
|
|
|
|
}
|
|
|
|
|
|
|
|
if key == "" {
|
|
|
|
// Frontend fully owns the state, we just need to obey it.
|
|
|
|
b.logf("Using frontend prefs")
|
2020-02-27 20:20:29 +00:00
|
|
|
b.prefs = prefs.Clone()
|
2020-02-14 00:38:36 +00:00
|
|
|
b.stateKey = ""
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if prefs != nil {
|
|
|
|
// Backend owns the state, but frontend is trying to migrate
|
|
|
|
// state into the backend.
|
2020-02-03 18:35:52 +00:00
|
|
|
b.logf("Importing frontend prefs into backend store")
|
|
|
|
if err := b.store.WriteState(key, prefs.ToBytes()); err != nil {
|
|
|
|
return fmt.Errorf("store.WriteState: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-14 00:38:36 +00:00
|
|
|
b.logf("Using backend prefs")
|
|
|
|
bs, err := b.store.ReadState(key)
|
|
|
|
if err != nil {
|
2020-03-12 12:44:24 +00:00
|
|
|
if errors.Is(err, ErrStateNotExist) {
|
2020-02-20 07:23:34 +00:00
|
|
|
if legacyPath != "" {
|
2020-02-20 19:07:00 +00:00
|
|
|
b.prefs, err = LoadPrefs(legacyPath, true)
|
|
|
|
if err != nil {
|
|
|
|
b.logf("Failed to load legacy prefs: %v", err)
|
|
|
|
b.prefs = NewPrefs()
|
|
|
|
} else {
|
|
|
|
b.logf("Imported state from relaynode for %q", key)
|
|
|
|
}
|
2020-02-20 07:23:34 +00:00
|
|
|
} else {
|
|
|
|
b.prefs = NewPrefs()
|
|
|
|
b.logf("Created empty state for %q", key)
|
|
|
|
}
|
2020-02-14 00:38:36 +00:00
|
|
|
b.stateKey = key
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("store.ReadState(%q): %v", key, err)
|
|
|
|
}
|
|
|
|
b.prefs, err = PrefsFromBytes(bs, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("PrefsFromBytes: %v", err)
|
|
|
|
}
|
|
|
|
b.stateKey = key
|
2020-02-03 18:35:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 20:26:35 +00:00
|
|
|
// State returns the backend's state.
|
2020-02-05 22:16:58 +00:00
|
|
|
func (b *LocalBackend) State() State {
|
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
|
|
|
return b.state
|
|
|
|
}
|
|
|
|
|
2020-03-27 20:26:35 +00:00
|
|
|
// EngineStatus returns the engine status. See also: Status, and State.
|
|
|
|
//
|
|
|
|
// TODO(bradfitz): deprecated this and merge it with the Status method
|
|
|
|
// that returns ipnstate.Status? Maybe have that take flags for what info
|
|
|
|
// the caller cares about?
|
2020-02-05 22:16:58 +00:00
|
|
|
func (b *LocalBackend) EngineStatus() EngineStatus {
|
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
|
|
|
return b.engineStatus
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) StartLoginInteractive() {
|
|
|
|
b.mu.Lock()
|
2020-02-25 20:30:28 +00:00
|
|
|
b.assertClientLocked()
|
2020-02-05 22:16:58 +00:00
|
|
|
b.interact++
|
|
|
|
url := b.authURL
|
2020-02-25 20:30:28 +00:00
|
|
|
c := b.c
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("StartLoginInteractive: url=%v", url != "")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
if url != "" {
|
|
|
|
b.popBrowserAuthNow()
|
|
|
|
} else {
|
2020-02-25 20:30:28 +00:00
|
|
|
c.Login(nil, controlclient.LoginInteractive)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) FakeExpireAfter(x time.Duration) {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("FakeExpireAfter: %v", x)
|
2020-02-05 22:16:58 +00:00
|
|
|
if b.netMapCache != nil {
|
|
|
|
e := b.netMapCache.Expiry
|
|
|
|
if e.IsZero() || time.Until(e) > x {
|
|
|
|
b.netMapCache.Expiry = time.Now().Add(x)
|
|
|
|
}
|
|
|
|
b.send(Notify{NetMap: b.netMapCache})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) LocalAddrs() []wgcfg.CIDR {
|
|
|
|
if b.netMapCache != nil {
|
|
|
|
return b.netMapCache.Addresses
|
|
|
|
} else {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) Expiry() time.Time {
|
|
|
|
if b.netMapCache != nil {
|
|
|
|
return b.netMapCache.Expiry
|
|
|
|
} else {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) parseWgStatus(s *wgengine.Status) EngineStatus {
|
|
|
|
var ss []string
|
2020-05-15 20:13:44 +00:00
|
|
|
var ps []string
|
2020-02-05 22:16:58 +00:00
|
|
|
var rx, tx wgengine.ByteCount
|
|
|
|
peers := make(map[tailcfg.NodeKey]wgengine.PeerStatus)
|
|
|
|
|
|
|
|
live := 0
|
|
|
|
for _, p := range s.Peers {
|
2020-05-15 20:13:44 +00:00
|
|
|
if !p.LastHandshake.IsZero() {
|
2020-02-05 22:16:58 +00:00
|
|
|
ss = append(ss, fmt.Sprintf("%d/%d", p.RxBytes, p.TxBytes))
|
|
|
|
live++
|
|
|
|
peers[p.NodeKey] = p
|
2020-05-15 20:13:44 +00:00
|
|
|
|
|
|
|
ps = append(ps, p.NodeKey.ShortString())
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
rx += p.RxBytes
|
|
|
|
tx += p.TxBytes
|
|
|
|
}
|
2020-05-12 16:14:37 +00:00
|
|
|
if len(ss) != 0 {
|
2020-05-15 20:13:44 +00:00
|
|
|
b.keyLogf("peer keys: %s", strings.Join(ps, " "))
|
2020-05-12 16:14:37 +00:00
|
|
|
b.logf("v%v peers: %v", version.LONG, strings.Join(ss, " "))
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
return EngineStatus{
|
|
|
|
RBytes: rx,
|
|
|
|
WBytes: tx,
|
|
|
|
NumLive: live,
|
2020-03-19 06:55:14 +00:00
|
|
|
LiveDERPs: s.DERPs,
|
2020-02-05 22:16:58 +00:00
|
|
|
LivePeers: peers,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) AdminPageURL() string {
|
|
|
|
return b.serverURL + "/admin/machines"
|
|
|
|
}
|
|
|
|
|
2020-02-20 19:07:00 +00:00
|
|
|
func (b *LocalBackend) Prefs() *Prefs {
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
|
|
|
return b.prefs
|
|
|
|
}
|
|
|
|
|
2020-02-20 19:07:00 +00:00
|
|
|
func (b *LocalBackend) SetPrefs(new *Prefs) {
|
|
|
|
if new == nil {
|
|
|
|
panic("SetPrefs got nil prefs")
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
old := b.prefs
|
|
|
|
new.Persist = old.Persist // caller isn't allowed to override this
|
|
|
|
b.prefs = new
|
2020-02-03 18:35:52 +00:00
|
|
|
if b.stateKey != "" {
|
|
|
|
if err := b.store.WriteState(b.stateKey, b.prefs.ToBytes()); err != nil {
|
|
|
|
b.logf("Failed to save new controlclient state: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2020-02-18 03:33:01 +00:00
|
|
|
oldHi := b.hiCache
|
2020-02-27 20:20:29 +00:00
|
|
|
newHi := oldHi.Clone()
|
2020-02-18 03:33:01 +00:00
|
|
|
newHi.RoutableIPs = append([]wgcfg.CIDR(nil), b.prefs.AdvertiseRoutes...)
|
2020-02-25 18:04:20 +00:00
|
|
|
b.hiCache = newHi
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("SetPrefs: %v", new.Pretty())
|
2020-04-10 15:42:34 +00:00
|
|
|
|
2020-04-29 09:23:29 +00:00
|
|
|
if old.ShieldsUp != new.ShieldsUp || !oldHi.Equal(newHi) {
|
|
|
|
b.doSetHostinfoFilterServices(newHi)
|
2020-02-18 03:33:01 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 06:37:35 +00:00
|
|
|
b.updateFilter(b.netMapCache)
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
if old.WantRunning != new.WantRunning {
|
|
|
|
b.stateMachine()
|
|
|
|
} else {
|
|
|
|
b.authReconfig()
|
|
|
|
}
|
|
|
|
|
2020-02-20 19:07:00 +00:00
|
|
|
b.send(Notify{Prefs: new})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 09:23:29 +00:00
|
|
|
func (b *LocalBackend) doSetHostinfoFilterServices(hi *tailcfg.Hostinfo) {
|
|
|
|
hi2 := *hi
|
|
|
|
prefs := b.Prefs()
|
|
|
|
if prefs != nil && prefs.ShieldsUp {
|
|
|
|
// No local services are available, since ShieldsUp will block
|
|
|
|
// them all.
|
|
|
|
hi2.Services = []tailcfg.Service{}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.mu.Lock()
|
|
|
|
cli := b.c
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
// b.c might not be started yet
|
|
|
|
if cli != nil {
|
|
|
|
cli.SetHostinfo(&hi2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// Note: return value may be nil, if we haven't received a netmap yet.
|
|
|
|
func (b *LocalBackend) NetMap() *controlclient.NetworkMap {
|
|
|
|
return b.netMapCache
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) blockEngineUpdates(block bool) {
|
|
|
|
// TODO(apenwarr): probably need mutex here (and several other places)
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("blockEngineUpdates(%v)", block)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
b.mu.Lock()
|
|
|
|
b.blocked = block
|
|
|
|
b.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-05-07 17:48:11 +00:00
|
|
|
// authReconfig pushes a new configuration into wgengine, based on the
|
|
|
|
// cached netmap and user prefs.
|
2020-02-05 22:16:58 +00:00
|
|
|
func (b *LocalBackend) authReconfig() {
|
|
|
|
b.mu.Lock()
|
|
|
|
blocked := b.blocked
|
|
|
|
uc := b.prefs
|
|
|
|
nm := b.netMapCache
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if blocked {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("authReconfig: blocked, skipping.")
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if nm == nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("authReconfig: netmap not yet valid. Skipping.")
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if !uc.WantRunning {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("authReconfig: skipping because !WantRunning.")
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uflags := controlclient.UDefault
|
|
|
|
if uc.RouteAll {
|
|
|
|
uflags |= controlclient.UAllowDefaultRoute
|
|
|
|
// TODO(apenwarr): Make subnet routes a different pref?
|
|
|
|
uflags |= controlclient.UAllowSubnetRoutes
|
|
|
|
// TODO(apenwarr): Remove this once we sort out subnet routes.
|
|
|
|
// Right now default routes are broken in Windows, but
|
|
|
|
// controlclient doesn't properly send subnet routes. So
|
|
|
|
// let's convert a default route into a subnet route in order
|
|
|
|
// to allow experimentation.
|
|
|
|
uflags |= controlclient.UHackDefaultRoute
|
|
|
|
}
|
|
|
|
if uc.AllowSingleHosts {
|
|
|
|
uflags |= controlclient.UAllowSingleHosts
|
|
|
|
}
|
|
|
|
|
2020-04-10 14:52:30 +00:00
|
|
|
dns := nm.DNS
|
|
|
|
dom := nm.DNSDomains
|
|
|
|
if !uc.CorpDNS {
|
|
|
|
dns = []wgcfg.IP{}
|
|
|
|
dom = []string{}
|
|
|
|
}
|
|
|
|
cfg, err := nm.WGCfg(uflags, dns)
|
|
|
|
if err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Fatalf("WGCfg: %v", err)
|
2020-04-10 14:52:30 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-05-12 07:08:52 +00:00
|
|
|
err = b.e.Reconfig(cfg, routerConfig(cfg, uc, dom))
|
2020-04-10 15:42:34 +00:00
|
|
|
if err == wgengine.ErrNoChanges {
|
|
|
|
return
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("authReconfig: ra=%v dns=%v 0x%02x: %v", uc.RouteAll, uc.CorpDNS, uflags, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 07:08:52 +00:00
|
|
|
// routerConfig produces a router.Config from a wireguard config,
|
2020-05-11 21:02:12 +00:00
|
|
|
// IPN prefs, and the dnsDomains pulled from control's network map.
|
2020-05-12 07:08:52 +00:00
|
|
|
func routerConfig(cfg *wgcfg.Config, prefs *Prefs, dnsDomains []string) *router.Config {
|
2020-05-11 21:02:12 +00:00
|
|
|
var addrs []wgcfg.CIDR
|
|
|
|
for _, addr := range cfg.Addresses {
|
|
|
|
addrs = append(addrs, wgcfg.CIDR{
|
|
|
|
IP: addr.IP,
|
|
|
|
// TODO(apenwarr): this shouldn't be hardcoded in the client
|
|
|
|
// TODO(danderson): fairly sure we can make this a /32 or
|
|
|
|
// /128 based on address family. Need to check behavior on
|
|
|
|
// !linux OSes.
|
|
|
|
Mask: 10,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-12 07:08:52 +00:00
|
|
|
rs := &router.Config{
|
2020-05-13 22:35:22 +00:00
|
|
|
LocalAddrs: wgCIDRToNetaddr(addrs),
|
|
|
|
DNS: wgIPToNetaddr(cfg.DNS),
|
|
|
|
DNSDomains: dnsDomains,
|
|
|
|
SubnetRoutes: wgCIDRToNetaddr(prefs.AdvertiseRoutes),
|
|
|
|
SNATSubnetRoutes: !prefs.NoSNAT,
|
2020-05-15 02:07:06 +00:00
|
|
|
NetfilterMode: prefs.NetfilterMode,
|
2020-05-11 21:02:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, peer := range cfg.Peers {
|
|
|
|
rs.Routes = append(rs.Routes, wgCIDRToNetaddr(peer.AllowedIPs)...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return rs
|
|
|
|
}
|
|
|
|
|
|
|
|
func wgIPToNetaddr(ips []wgcfg.IP) (ret []netaddr.IP) {
|
|
|
|
for _, ip := range ips {
|
|
|
|
nip, ok := netaddr.FromStdIP(ip.IP())
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("conversion of %s from wgcfg to netaddr IP failed", ip))
|
|
|
|
}
|
|
|
|
ret = append(ret, nip.Unmap())
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
func wgCIDRToNetaddr(cidrs []wgcfg.CIDR) (ret []netaddr.IPPrefix) {
|
|
|
|
for _, cidr := range cidrs {
|
|
|
|
ncidr, ok := netaddr.FromStdIPNet(cidr.IPNet())
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("conversion of %s from wgcfg to netaddr IPNet failed", cidr))
|
|
|
|
}
|
|
|
|
ncidr.IP = ncidr.IP.Unmap()
|
|
|
|
ret = append(ret, ncidr)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (b *LocalBackend) enterState(newState State) {
|
|
|
|
b.mu.Lock()
|
|
|
|
state := b.state
|
|
|
|
prefs := b.prefs
|
2020-02-25 20:30:28 +00:00
|
|
|
notify := b.notify
|
2020-02-05 22:16:58 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if state == newState {
|
|
|
|
return
|
|
|
|
}
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Switching ipn state %v -> %v (WantRunning=%v)",
|
2020-02-05 22:16:58 +00:00
|
|
|
state, newState, prefs.WantRunning)
|
2020-02-25 20:30:28 +00:00
|
|
|
if notify != nil {
|
2020-02-05 22:16:58 +00:00
|
|
|
b.send(Notify{State: &newState})
|
|
|
|
}
|
|
|
|
|
|
|
|
b.state = newState
|
|
|
|
switch newState {
|
|
|
|
case NeedsLogin:
|
|
|
|
b.blockEngineUpdates(true)
|
|
|
|
fallthrough
|
|
|
|
case Stopped:
|
2020-05-12 07:08:52 +00:00
|
|
|
err := b.e.Reconfig(&wgcfg.Config{}, nil)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("Reconfig(down): %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
case Starting, NeedsMachineAuth:
|
|
|
|
b.authReconfig()
|
|
|
|
// Needed so that UpdateEndpoints can run
|
|
|
|
b.e.RequestStatus()
|
|
|
|
case Running:
|
|
|
|
break
|
|
|
|
default:
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("[unexpected] unknown newState %#v", newState)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) nextState() State {
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
b.assertClientLocked()
|
2020-02-29 02:34:56 +00:00
|
|
|
var (
|
|
|
|
c = b.c
|
|
|
|
netMap = b.netMapCache
|
|
|
|
state = b.state
|
|
|
|
wantRunning = b.prefs.WantRunning
|
|
|
|
)
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
if netMap == nil {
|
2020-02-25 20:30:28 +00:00
|
|
|
if c.AuthCantContinue() {
|
2020-02-05 22:16:58 +00:00
|
|
|
// Auth was interrupted or waiting for URL visit,
|
|
|
|
// so it won't proceed without human help.
|
|
|
|
return NeedsLogin
|
|
|
|
} else {
|
|
|
|
// Auth or map request needs to finish
|
|
|
|
return state
|
|
|
|
}
|
2020-02-29 02:34:56 +00:00
|
|
|
} else if !wantRunning {
|
2020-02-05 22:16:58 +00:00
|
|
|
return Stopped
|
2020-02-28 19:39:13 +00:00
|
|
|
} else if e := netMap.Expiry; !e.IsZero() && time.Until(e) <= 0 {
|
2020-02-05 22:16:58 +00:00
|
|
|
return NeedsLogin
|
2020-02-28 19:39:13 +00:00
|
|
|
} else if netMap.MachineStatus != tailcfg.MachineAuthorized {
|
2020-02-05 22:16:58 +00:00
|
|
|
// TODO(crawshaw): handle tailcfg.MachineInvalid
|
|
|
|
return NeedsMachineAuth
|
|
|
|
} else if state == NeedsMachineAuth {
|
|
|
|
// (if we get here, we know MachineAuthorized == true)
|
|
|
|
return Starting
|
|
|
|
} else if state == Starting {
|
2020-03-19 06:55:14 +00:00
|
|
|
if st := b.EngineStatus(); st.NumLive > 0 || st.LiveDERPs > 0 {
|
2020-02-05 22:16:58 +00:00
|
|
|
return Running
|
|
|
|
} else {
|
|
|
|
return state
|
|
|
|
}
|
|
|
|
} else if state == Running {
|
|
|
|
return Running
|
|
|
|
} else {
|
|
|
|
return Starting
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) RequestEngineStatus() {
|
|
|
|
b.e.RequestStatus()
|
|
|
|
}
|
|
|
|
|
2020-03-27 20:26:35 +00:00
|
|
|
func (b *LocalBackend) RequestStatus() {
|
|
|
|
st := b.Status()
|
|
|
|
b.notify(Notify{Status: st})
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// TODO(apenwarr): use a channel or something to prevent re-entrancy?
|
|
|
|
// Or maybe just call the state machine from fewer places.
|
|
|
|
func (b *LocalBackend) stateMachine() {
|
|
|
|
b.enterState(b.nextState())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) stopEngineAndWait() {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("stopEngineAndWait...")
|
2020-05-12 07:08:52 +00:00
|
|
|
b.e.Reconfig(&wgcfg.Config{}, nil)
|
2020-02-05 22:16:58 +00:00
|
|
|
b.requestEngineStatusAndWait()
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("stopEngineAndWait: done.")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Requests the wgengine status, and does not return until the status
|
|
|
|
// was delivered (to the usual callback).
|
|
|
|
func (b *LocalBackend) requestEngineStatusAndWait() {
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("requestEngineStatusAndWait")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
b.statusLock.Lock()
|
|
|
|
go b.e.RequestStatus()
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("requestEngineStatusAndWait: waiting...")
|
2020-02-05 22:16:58 +00:00
|
|
|
b.statusChanged.Wait() // temporarily releases lock while waiting
|
2020-04-11 15:35:34 +00:00
|
|
|
b.logf("requestEngineStatusAndWait: got status update.")
|
2020-02-05 22:16:58 +00:00
|
|
|
b.statusLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE(apenwarr): No easy way to persist logged-out status.
|
|
|
|
// Maybe that's for the better; if someone logs out accidentally,
|
|
|
|
// rebooting will fix it.
|
|
|
|
func (b *LocalBackend) Logout() {
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Lock()
|
|
|
|
b.assertClientLocked()
|
|
|
|
c := b.c
|
2020-02-05 22:16:58 +00:00
|
|
|
b.netMapCache = nil
|
2020-02-28 19:39:13 +00:00
|
|
|
b.mu.Unlock()
|
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
c.Logout()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-25 20:30:28 +00:00
|
|
|
b.mu.Lock()
|
2020-02-28 19:39:13 +00:00
|
|
|
b.netMapCache = nil
|
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
|
|
b.stateMachine()
|
2020-02-25 20:30:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *LocalBackend) assertClientLocked() {
|
2020-02-05 22:16:58 +00:00
|
|
|
if b.c == nil {
|
|
|
|
panic("LocalBackend.assertClient: b.c == nil")
|
|
|
|
}
|
|
|
|
}
|
2020-02-25 22:05:17 +00:00
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
func (b *LocalBackend) SetNetInfo(ni *tailcfg.NetInfo) {
|
|
|
|
b.mu.Lock()
|
|
|
|
c := b.c
|
|
|
|
if b.hiCache != nil {
|
|
|
|
b.hiCache.NetInfo = ni.Clone()
|
2020-02-25 22:05:17 +00:00
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
b.mu.Unlock()
|
2020-02-25 22:05:17 +00:00
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
if c == nil {
|
|
|
|
return
|
2020-02-25 22:05:17 +00:00
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
c.SetNetInfo(ni)
|
2020-02-25 22:05:17 +00:00
|
|
|
}
|