2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package controlclient
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
2020-10-14 21:01:33 +00:00
|
|
|
"flag"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net/http"
|
2020-04-27 15:18:35 +00:00
|
|
|
"net/url"
|
2020-02-05 22:16:58 +00:00
|
|
|
"os"
|
2020-04-02 00:18:39 +00:00
|
|
|
"reflect"
|
2020-07-28 04:14:28 +00:00
|
|
|
"runtime"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-08-17 19:56:17 +00:00
|
|
|
"sync/atomic"
|
2020-02-05 22:16:58 +00:00
|
|
|
"time"
|
|
|
|
|
2021-09-03 20:17:46 +00:00
|
|
|
"go4.org/mem"
|
2022-03-07 23:32:53 +00:00
|
|
|
"golang.org/x/sync/singleflight"
|
2020-07-31 20:27:09 +00:00
|
|
|
"inet.af/netaddr"
|
2021-06-22 22:29:01 +00:00
|
|
|
"tailscale.com/control/controlknobs"
|
2022-01-24 18:52:57 +00:00
|
|
|
"tailscale.com/envknob"
|
2021-02-18 16:58:13 +00:00
|
|
|
"tailscale.com/health"
|
2021-08-20 17:34:13 +00:00
|
|
|
"tailscale.com/hostinfo"
|
2021-06-08 15:02:05 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2020-04-08 05:24:06 +00:00
|
|
|
"tailscale.com/log/logheap"
|
2022-04-18 20:43:03 +00:00
|
|
|
"tailscale.com/logtail"
|
2020-11-11 20:37:53 +00:00
|
|
|
"tailscale.com/net/dnscache"
|
2021-02-26 20:49:54 +00:00
|
|
|
"tailscale.com/net/dnsfallback"
|
2021-03-04 03:19:41 +00:00
|
|
|
"tailscale.com/net/interfaces"
|
2022-03-28 17:24:11 +00:00
|
|
|
"tailscale.com/net/netutil"
|
2020-04-25 20:24:53 +00:00
|
|
|
"tailscale.com/net/tlsdial"
|
2022-04-27 18:57:59 +00:00
|
|
|
"tailscale.com/net/tsdial"
|
2020-08-13 22:25:54 +00:00
|
|
|
"tailscale.com/net/tshttpproxy"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/tailcfg"
|
2021-09-03 20:17:46 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-02-15 03:23:16 +00:00
|
|
|
"tailscale.com/types/logger"
|
2021-02-05 23:44:46 +00:00
|
|
|
"tailscale.com/types/netmap"
|
2020-08-17 19:56:17 +00:00
|
|
|
"tailscale.com/types/opt"
|
2021-02-05 23:23:01 +00:00
|
|
|
"tailscale.com/types/persist"
|
2021-11-16 16:34:25 +00:00
|
|
|
"tailscale.com/util/clientmetric"
|
2022-03-07 18:55:02 +00:00
|
|
|
"tailscale.com/util/multierr"
|
2020-11-24 23:35:04 +00:00
|
|
|
"tailscale.com/util/systemd"
|
2021-03-05 04:11:55 +00:00
|
|
|
"tailscale.com/wgengine/monitor"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Direct is the client that connects to a tailcontrol server for a node.
|
|
|
|
type Direct struct {
|
2021-01-13 23:03:15 +00:00
|
|
|
httpc *http.Client // HTTP client used to talk to tailcontrol
|
2022-04-27 18:57:59 +00:00
|
|
|
dialer *tsdial.Dialer
|
|
|
|
serverURL string // URL of the tailcontrol server
|
2021-01-13 23:03:15 +00:00
|
|
|
timeNow func() time.Time
|
|
|
|
lastPrintMap time.Time
|
|
|
|
newDecompressor func() (Decompressor, error)
|
|
|
|
keepAlive bool
|
|
|
|
logf logger.Logf
|
2021-03-05 04:11:55 +00:00
|
|
|
linkMon *monitor.Mon // or nil
|
2021-11-02 21:41:56 +00:00
|
|
|
discoPubKey key.DiscoPublic
|
2021-09-03 20:17:46 +00:00
|
|
|
getMachinePrivKey func() (key.MachinePrivate, error)
|
2021-01-13 23:03:15 +00:00
|
|
|
debugFlags []string
|
|
|
|
keepSharerAndUserSplit bool
|
2021-03-31 18:55:21 +00:00
|
|
|
skipIPForwardingCheck bool
|
2021-06-08 15:02:05 +00:00
|
|
|
pinger Pinger
|
2022-03-21 21:10:25 +00:00
|
|
|
popBrowser func(url string) // or nil
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2022-03-07 18:55:02 +00:00
|
|
|
mu sync.Mutex // mutex guards the following fields
|
|
|
|
serverKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key
|
|
|
|
serverNoiseKey key.MachinePublic
|
|
|
|
|
2022-03-07 23:32:53 +00:00
|
|
|
sfGroup singleflight.Group // protects noiseClient creation.
|
|
|
|
noiseClient *noiseClient
|
|
|
|
|
2022-05-03 22:07:30 +00:00
|
|
|
persist persist.Persist
|
|
|
|
authKey string
|
|
|
|
tryingNewKey key.NodePrivate
|
|
|
|
expiry *time.Time
|
2020-10-14 21:01:33 +00:00
|
|
|
hostinfo *tailcfg.Hostinfo // always non-nil
|
2022-05-03 22:07:30 +00:00
|
|
|
netinfo *tailcfg.NetInfo
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
endpoints []tailcfg.Endpoint
|
2020-10-14 21:01:33 +00:00
|
|
|
everEndpoints bool // whether we've ever had non-empty endpoints
|
|
|
|
localPort uint16 // or zero to mean auto
|
2021-10-13 00:28:44 +00:00
|
|
|
lastPingURL string // last PingRequest.URL received, for dup suppression
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Options struct {
|
2021-09-03 20:17:46 +00:00
|
|
|
Persist persist.Persist // initial persistent data
|
|
|
|
GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use
|
|
|
|
ServerURL string // URL of the tailcontrol server
|
|
|
|
AuthKey string // optional node auth key for auto registration
|
|
|
|
TimeNow func() time.Time // time.Now implementation used by Client
|
|
|
|
Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc
|
2021-11-02 21:41:56 +00:00
|
|
|
DiscoPublicKey key.DiscoPublic
|
2021-03-31 15:51:22 +00:00
|
|
|
NewDecompressor func() (Decompressor, error)
|
|
|
|
KeepAlive bool
|
|
|
|
Logf logger.Logf
|
2022-03-21 21:10:25 +00:00
|
|
|
HTTPTestClient *http.Client // optional HTTP client to use (for tests only)
|
|
|
|
DebugFlags []string // debug settings to send to control
|
|
|
|
LinkMonitor *monitor.Mon // optional link monitor
|
|
|
|
PopBrowserURL func(url string) // optional func to open browser
|
2022-04-27 18:57:59 +00:00
|
|
|
Dialer *tsdial.Dialer // non-nil
|
2021-01-13 23:03:15 +00:00
|
|
|
|
|
|
|
// KeepSharerAndUserSplit controls whether the client
|
|
|
|
// understands Node.Sharer. If false, the Sharer is mapped to the User.
|
|
|
|
KeepSharerAndUserSplit bool
|
2021-03-31 18:55:21 +00:00
|
|
|
|
|
|
|
// SkipIPForwardingCheck declares that the host's IP
|
|
|
|
// forwarding works and should not be double-checked by the
|
|
|
|
// controlclient package.
|
|
|
|
SkipIPForwardingCheck bool
|
2021-06-08 15:02:05 +00:00
|
|
|
|
|
|
|
// Pinger optionally specifies the Pinger to use to satisfy
|
|
|
|
// MapResponse.PingRequest queries from the control plane.
|
|
|
|
// If nil, PingRequest queries are not answered.
|
|
|
|
Pinger Pinger
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pinger is a subset of the wgengine.Engine interface, containing just the Ping method.
|
|
|
|
type Pinger interface {
|
2022-04-22 01:49:01 +00:00
|
|
|
// Ping is a request to start a ping with the peer handling the given IP and
|
|
|
|
// then call cb with its ping latency & method.
|
|
|
|
Ping(ip netaddr.IP, pingType tailcfg.PingType, cb func(*ipnstate.PingResult))
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Decompressor interface {
|
|
|
|
DecodeAll(input, dst []byte) ([]byte, error)
|
|
|
|
Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewDirect returns a new Direct client.
|
|
|
|
func NewDirect(opts Options) (*Direct, error) {
|
|
|
|
if opts.ServerURL == "" {
|
|
|
|
return nil, errors.New("controlclient.New: no server URL specified")
|
|
|
|
}
|
2021-03-31 15:51:22 +00:00
|
|
|
if opts.GetMachinePrivateKey == nil {
|
|
|
|
return nil, errors.New("controlclient.New: no GetMachinePrivateKey specified")
|
2020-09-28 22:28:26 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
opts.ServerURL = strings.TrimRight(opts.ServerURL, "/")
|
2020-04-27 15:18:35 +00:00
|
|
|
serverURL, err := url.Parse(opts.ServerURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if opts.TimeNow == nil {
|
|
|
|
opts.TimeNow = time.Now
|
|
|
|
}
|
|
|
|
if opts.Logf == nil {
|
|
|
|
// TODO(apenwarr): remove this default and fail instead.
|
2020-02-25 18:04:20 +00:00
|
|
|
// TODO(bradfitz): ... but then it shouldn't be in Options.
|
2020-02-05 22:16:58 +00:00
|
|
|
opts.Logf = log.Printf
|
|
|
|
}
|
2020-04-25 20:24:53 +00:00
|
|
|
|
2020-04-26 14:45:42 +00:00
|
|
|
httpc := opts.HTTPTestClient
|
2021-11-07 21:49:55 +00:00
|
|
|
if httpc == nil && runtime.GOOS == "js" {
|
|
|
|
// In js/wasm, net/http.Transport (as of Go 1.18) will
|
|
|
|
// only use the browser's Fetch API if you're using
|
|
|
|
// the DefaultClient (or a client without dial hooks
|
|
|
|
// etc set).
|
|
|
|
httpc = http.DefaultClient
|
|
|
|
}
|
2020-04-26 14:45:42 +00:00
|
|
|
if httpc == nil {
|
2020-11-11 20:37:53 +00:00
|
|
|
dnsCache := &dnscache.Resolver{
|
2021-02-26 20:49:54 +00:00
|
|
|
Forward: dnscache.Get().Forward, // use default cache's forwarder
|
|
|
|
UseLastGood: true,
|
|
|
|
LookupIPFallback: dnsfallback.Lookup,
|
2020-11-11 20:37:53 +00:00
|
|
|
}
|
2020-04-26 14:45:42 +00:00
|
|
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
2020-08-13 22:25:54 +00:00
|
|
|
tr.Proxy = tshttpproxy.ProxyFromEnvironment
|
2020-08-27 03:02:16 +00:00
|
|
|
tshttpproxy.SetTransportGetProxyConnectHeader(tr)
|
2021-04-10 02:09:22 +00:00
|
|
|
tr.TLSClientConfig = tlsdial.Config(serverURL.Hostname(), tr.TLSClientConfig)
|
2022-04-27 18:57:59 +00:00
|
|
|
tr.DialContext = dnscache.Dialer(opts.Dialer.SystemDial, dnsCache)
|
|
|
|
tr.DialTLSContext = dnscache.TLSDialer(opts.Dialer.SystemDial, dnsCache, tr.TLSClientConfig)
|
2020-04-26 14:45:42 +00:00
|
|
|
tr.ForceAttemptHTTP2 = true
|
2022-02-07 21:42:28 +00:00
|
|
|
// Disable implicit gzip compression; the various
|
|
|
|
// handlers (register, map, set-dns, etc) do their own
|
|
|
|
// zstd compression per naclbox.
|
|
|
|
tr.DisableCompression = true
|
2020-04-26 14:45:42 +00:00
|
|
|
httpc = &http.Client{Transport: tr}
|
|
|
|
}
|
2020-04-25 20:24:53 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
c := &Direct{
|
2021-01-13 23:03:15 +00:00
|
|
|
httpc: httpc,
|
2021-03-31 15:51:22 +00:00
|
|
|
getMachinePrivKey: opts.GetMachinePrivateKey,
|
2021-01-13 23:03:15 +00:00
|
|
|
serverURL: opts.ServerURL,
|
|
|
|
timeNow: opts.TimeNow,
|
|
|
|
logf: opts.Logf,
|
|
|
|
newDecompressor: opts.NewDecompressor,
|
|
|
|
keepAlive: opts.KeepAlive,
|
|
|
|
persist: opts.Persist,
|
|
|
|
authKey: opts.AuthKey,
|
|
|
|
discoPubKey: opts.DiscoPublicKey,
|
|
|
|
debugFlags: opts.DebugFlags,
|
|
|
|
keepSharerAndUserSplit: opts.KeepSharerAndUserSplit,
|
2021-03-05 04:11:55 +00:00
|
|
|
linkMon: opts.LinkMonitor,
|
2021-03-31 18:55:21 +00:00
|
|
|
skipIPForwardingCheck: opts.SkipIPForwardingCheck,
|
2021-06-08 15:02:05 +00:00
|
|
|
pinger: opts.Pinger,
|
2022-03-21 21:10:25 +00:00
|
|
|
popBrowser: opts.PopBrowserURL,
|
2022-04-27 18:57:59 +00:00
|
|
|
dialer: opts.Dialer,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if opts.Hostinfo == nil {
|
2021-08-20 17:34:13 +00:00
|
|
|
c.SetHostinfo(hostinfo.New())
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
2022-05-03 22:07:30 +00:00
|
|
|
ni := opts.Hostinfo.NetInfo
|
|
|
|
opts.Hostinfo.NetInfo = nil
|
2020-02-25 18:04:20 +00:00
|
|
|
c.SetHostinfo(opts.Hostinfo)
|
2022-05-03 22:07:30 +00:00
|
|
|
if ni != nil {
|
|
|
|
c.SetNetInfo(ni)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2022-03-07 23:32:53 +00:00
|
|
|
// Close closes the underlying Noise connection(s).
|
|
|
|
func (c *Direct) Close() error {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.noiseClient != nil {
|
|
|
|
if err := c.noiseClient.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.noiseClient = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-25 18:04:20 +00:00
|
|
|
// SetHostinfo clones the provided Hostinfo and remembers it for the
|
2020-04-02 00:18:39 +00:00
|
|
|
// next update. It reports whether the Hostinfo has changed.
|
|
|
|
func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool {
|
2020-02-25 18:04:20 +00:00
|
|
|
if hi == nil {
|
|
|
|
panic("nil Hostinfo")
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2020-04-02 00:18:39 +00:00
|
|
|
if hi.Equal(c.hostinfo) {
|
|
|
|
return false
|
|
|
|
}
|
2020-02-27 20:20:29 +00:00
|
|
|
c.hostinfo = hi.Clone()
|
2020-10-19 15:30:36 +00:00
|
|
|
j, _ := json.Marshal(c.hostinfo)
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] HostInfo: %s", j)
|
2020-04-02 00:18:39 +00:00
|
|
|
return true
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
// SetNetInfo clones the provided NetInfo and remembers it for the
|
2020-04-02 00:18:39 +00:00
|
|
|
// next update. It reports whether the NetInfo has changed.
|
|
|
|
func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool {
|
2020-03-04 06:21:56 +00:00
|
|
|
if ni == nil {
|
|
|
|
panic("nil NetInfo")
|
|
|
|
}
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2022-05-03 22:07:30 +00:00
|
|
|
if reflect.DeepEqual(ni, c.netinfo) {
|
2020-04-02 00:18:39 +00:00
|
|
|
return false
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
2022-05-03 22:07:30 +00:00
|
|
|
c.netinfo = ni.Clone()
|
|
|
|
c.logf("NetInfo: %v", ni)
|
2020-04-02 00:18:39 +00:00
|
|
|
return true
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 23:23:01 +00:00
|
|
|
func (c *Direct) GetPersist() persist.Persist {
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
return c.persist
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) TryLogout(ctx context.Context) error {
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] direct.TryLogout()")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
mustRegen, newURL, err := c.doLogin(ctx, loginOpt{Logout: true})
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] TryLogout control response: mustRegen=%v, newURL=%v, err=%v", mustRegen, newURL, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
c.mu.Lock()
|
2021-02-05 23:23:01 +00:00
|
|
|
c.persist = persist.Persist{}
|
2021-04-08 04:06:31 +00:00
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
return err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-19 17:21:33 +00:00
|
|
|
func (c *Direct) TryLogin(ctx context.Context, t *tailcfg.Oauth2Token, flags LoginFlags) (url string, err error) {
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] direct.TryLogin(token=%v, flags=%v)", t != nil, flags)
|
2021-04-08 04:06:31 +00:00
|
|
|
return c.doLoginOrRegen(ctx, loginOpt{Token: t, Flags: flags})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-31 15:25:39 +00:00
|
|
|
// WaitLoginURL sits in a long poll waiting for the user to authenticate at url.
|
|
|
|
//
|
|
|
|
// On success, newURL and err will both be nil.
|
|
|
|
func (c *Direct) WaitLoginURL(ctx context.Context, url string) (newURL string, err error) {
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] direct.WaitLoginURL")
|
2021-04-08 04:06:31 +00:00
|
|
|
return c.doLoginOrRegen(ctx, loginOpt{URL: url})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
func (c *Direct) doLoginOrRegen(ctx context.Context, opt loginOpt) (newURL string, err error) {
|
|
|
|
mustRegen, url, err := c.doLogin(ctx, opt)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return url, err
|
|
|
|
}
|
2021-04-08 04:06:31 +00:00
|
|
|
if mustRegen {
|
|
|
|
opt.Regen = true
|
|
|
|
_, url, err = c.doLogin(ctx, opt)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return url, err
|
|
|
|
}
|
|
|
|
|
2022-03-09 22:42:42 +00:00
|
|
|
// SetExpirySooner attempts to shorten the expiry to the specified time.
|
|
|
|
func (c *Direct) SetExpirySooner(ctx context.Context, expiry time.Time) error {
|
|
|
|
c.logf("[v1] direct.SetExpirySooner()")
|
|
|
|
|
|
|
|
newURL, err := c.doLoginOrRegen(ctx, loginOpt{Expiry: &expiry})
|
|
|
|
c.logf("[v1] SetExpirySooner control response: newURL=%v, err=%v", newURL, err)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
type loginOpt struct {
|
2021-04-08 04:06:31 +00:00
|
|
|
Token *tailcfg.Oauth2Token
|
|
|
|
Flags LoginFlags
|
2022-03-09 22:42:42 +00:00
|
|
|
Regen bool // generate a new nodekey, can be overridden in doLogin
|
2021-04-08 04:06:31 +00:00
|
|
|
URL string
|
2022-03-09 22:42:42 +00:00
|
|
|
Logout bool // set the expiry to the far past, expiring the node
|
|
|
|
// Expiry, if non-nil, attempts to set the node expiry to the
|
|
|
|
// specified time and cannot be used to extend the expiry.
|
|
|
|
// It is ignored if Logout is set since Logout works by setting a
|
|
|
|
// expiry time in the far past.
|
|
|
|
Expiry *time.Time
|
2021-04-08 04:06:31 +00:00
|
|
|
}
|
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
// httpClient provides a common interface for the noiseClient and
|
|
|
|
// the NaCl box http.Client.
|
|
|
|
type httpClient interface {
|
|
|
|
Do(req *http.Request) (*http.Response, error)
|
|
|
|
}
|
|
|
|
|
2022-05-03 22:07:30 +00:00
|
|
|
// hostInfoLocked returns a Clone of c.hostinfo and c.netinfo.
|
|
|
|
// It must only be called with c.mu held.
|
|
|
|
func (c *Direct) hostInfoLocked() *tailcfg.Hostinfo {
|
|
|
|
hi := c.hostinfo.Clone()
|
|
|
|
hi.NetInfo = c.netinfo.Clone()
|
|
|
|
return hi
|
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, newURL string, err error) {
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
persist := c.persist
|
|
|
|
tryingNewKey := c.tryingNewKey
|
|
|
|
serverKey := c.serverKey
|
2022-03-08 18:53:19 +00:00
|
|
|
serverNoiseKey := c.serverNoiseKey
|
2020-05-19 06:51:27 +00:00
|
|
|
authKey := c.authKey
|
2022-05-03 22:07:30 +00:00
|
|
|
hi := c.hostInfoLocked()
|
2021-11-10 16:11:14 +00:00
|
|
|
backendLogID := hi.BackendLogID
|
2020-02-05 22:16:58 +00:00
|
|
|
expired := c.expiry != nil && !c.expiry.IsZero() && c.expiry.Before(c.timeNow())
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
2021-03-31 15:51:22 +00:00
|
|
|
machinePrivKey, err := c.getMachinePrivKey()
|
|
|
|
if err != nil {
|
|
|
|
return false, "", fmt.Errorf("getMachinePrivKey: %w", err)
|
|
|
|
}
|
|
|
|
if machinePrivKey.IsZero() {
|
|
|
|
return false, "", errors.New("getMachinePrivKey returned zero key")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
regen := opt.Regen
|
2021-04-08 04:06:31 +00:00
|
|
|
if opt.Logout {
|
|
|
|
c.logf("logging out...")
|
|
|
|
} else {
|
|
|
|
if expired {
|
|
|
|
c.logf("Old key expired -> regen=true")
|
|
|
|
systemd.Status("key expired; run 'tailscale up' to authenticate")
|
|
|
|
regen = true
|
|
|
|
}
|
|
|
|
if (opt.Flags & LoginInteractive) != 0 {
|
|
|
|
c.logf("LoginInteractive -> regen=true")
|
|
|
|
regen = true
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
c.logf("doLogin(regen=%v, hasUrl=%v)", regen, opt.URL != "")
|
2020-12-30 01:22:56 +00:00
|
|
|
if serverKey.IsZero() {
|
2022-03-07 18:55:02 +00:00
|
|
|
keys, err := loadServerPubKeys(ctx, c.httpc, c.serverURL)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2022-04-27 02:11:55 +00:00
|
|
|
c.logf("control server key from %s: ts2021=%s, legacy=%v", c.serverURL, keys.PublicKey.ShortString(), keys.LegacyPublicKey.ShortString())
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
2022-03-07 18:55:02 +00:00
|
|
|
c.serverKey = keys.LegacyPublicKey
|
|
|
|
c.serverNoiseKey = keys.PublicKey
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Unlock()
|
2022-03-07 18:55:02 +00:00
|
|
|
serverKey = keys.LegacyPublicKey
|
2022-03-08 18:53:19 +00:00
|
|
|
serverNoiseKey = keys.PublicKey
|
2022-03-10 21:19:21 +00:00
|
|
|
|
|
|
|
// For servers supporting the Noise transport,
|
|
|
|
// proactively shut down our TLS TCP connection.
|
|
|
|
// We're not going to need it and it's nicer to the
|
|
|
|
// server.
|
|
|
|
if !serverNoiseKey.IsZero() {
|
|
|
|
c.httpc.CloseIdleConnections()
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-10-28 21:22:51 +00:00
|
|
|
var oldNodeKey key.NodePublic
|
2021-04-08 04:06:31 +00:00
|
|
|
switch {
|
|
|
|
case opt.Logout:
|
2021-10-28 21:22:51 +00:00
|
|
|
tryingNewKey = persist.PrivateNodeKey
|
2021-04-08 04:06:31 +00:00
|
|
|
case opt.URL != "":
|
|
|
|
// Nothing.
|
|
|
|
case regen || persist.PrivateNodeKey.IsZero():
|
2020-04-11 15:35:34 +00:00
|
|
|
c.logf("Generating a new nodekey.")
|
2020-02-05 22:16:58 +00:00
|
|
|
persist.OldPrivateNodeKey = persist.PrivateNodeKey
|
2021-10-28 21:22:51 +00:00
|
|
|
tryingNewKey = key.NewNode()
|
2021-04-08 04:06:31 +00:00
|
|
|
default:
|
2020-02-05 22:16:58 +00:00
|
|
|
// Try refreshing the current key first
|
2021-10-28 21:22:51 +00:00
|
|
|
tryingNewKey = persist.PrivateNodeKey
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-10-01 00:13:41 +00:00
|
|
|
if !persist.OldPrivateNodeKey.IsZero() {
|
2021-10-28 21:22:51 +00:00
|
|
|
oldNodeKey = persist.OldPrivateNodeKey.Public()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 00:50:40 +00:00
|
|
|
if tryingNewKey.IsZero() {
|
2021-04-08 04:06:31 +00:00
|
|
|
if opt.Logout {
|
|
|
|
return false, "", errors.New("no nodekey to log out")
|
|
|
|
}
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Fatalf("tryingNewKey is empty, give up")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-06-15 23:04:12 +00:00
|
|
|
if backendLogID == "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
err = errors.New("hostinfo: BackendLogID missing")
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-03-26 14:01:08 +00:00
|
|
|
now := time.Now().Round(time.Second)
|
2020-02-05 22:16:58 +00:00
|
|
|
request := tailcfg.RegisterRequest{
|
2022-03-08 18:53:19 +00:00
|
|
|
Version: 1,
|
2021-11-02 03:55:52 +00:00
|
|
|
OldNodeKey: oldNodeKey,
|
|
|
|
NodeKey: tryingNewKey.Public(),
|
2021-11-10 16:11:14 +00:00
|
|
|
Hostinfo: hi,
|
2021-04-08 04:06:31 +00:00
|
|
|
Followup: opt.URL,
|
2021-03-26 14:01:08 +00:00
|
|
|
Timestamp: &now,
|
2021-10-28 20:21:35 +00:00
|
|
|
Ephemeral: (opt.Flags & LoginEphemeral) != 0,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-04-08 04:06:31 +00:00
|
|
|
if opt.Logout {
|
|
|
|
request.Expiry = time.Unix(123, 0) // far in the past
|
2022-03-09 22:42:42 +00:00
|
|
|
} else if opt.Expiry != nil {
|
|
|
|
request.Expiry = *opt.Expiry
|
2021-04-08 04:06:31 +00:00
|
|
|
}
|
2020-04-11 15:35:34 +00:00
|
|
|
c.logf("RegisterReq: onode=%v node=%v fup=%v",
|
2020-03-18 22:10:46 +00:00
|
|
|
request.OldNodeKey.ShortString(),
|
2021-04-08 04:06:31 +00:00
|
|
|
request.NodeKey.ShortString(), opt.URL != "")
|
|
|
|
request.Auth.Oauth2Token = opt.Token
|
2020-02-05 22:16:58 +00:00
|
|
|
request.Auth.Provider = persist.Provider
|
|
|
|
request.Auth.LoginName = persist.LoginName
|
2020-05-19 06:51:27 +00:00
|
|
|
request.Auth.AuthKey = authKey
|
2021-03-31 15:51:22 +00:00
|
|
|
err = signRegisterRequest(&request, c.serverURL, c.serverKey, machinePrivKey.Public())
|
2021-03-26 14:01:08 +00:00
|
|
|
if err != nil {
|
|
|
|
// If signing failed, clear all related fields
|
|
|
|
request.SignatureType = tailcfg.SignatureNone
|
|
|
|
request.Timestamp = nil
|
|
|
|
request.DeviceCert = nil
|
|
|
|
request.Signature = nil
|
|
|
|
|
|
|
|
// Don't log the common error types. Signatures are not usually enabled,
|
|
|
|
// so these are expected.
|
2021-04-22 18:59:03 +00:00
|
|
|
if !errors.Is(err, errCertificateNotConfigured) && !errors.Is(err, errNoCertStore) {
|
2021-03-26 14:01:08 +00:00
|
|
|
c.logf("RegisterReq sign error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2021-04-08 04:06:31 +00:00
|
|
|
if debugRegister {
|
|
|
|
j, _ := json.MarshalIndent(request, "", "\t")
|
|
|
|
c.logf("RegisterRequest: %s", j)
|
|
|
|
}
|
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
// URL and httpc are protocol specific.
|
|
|
|
var url string
|
|
|
|
var httpc httpClient
|
|
|
|
if serverNoiseKey.IsZero() {
|
|
|
|
httpc = c.httpc
|
|
|
|
url = fmt.Sprintf("%s/machine/%s", c.serverURL, machinePrivKey.Public().UntypedHexString())
|
|
|
|
} else {
|
|
|
|
request.Version = tailcfg.CurrentCapabilityVersion
|
|
|
|
httpc, err = c.getNoiseClient()
|
|
|
|
if err != nil {
|
|
|
|
return regen, opt.URL, fmt.Errorf("getNoiseClient: %w", err)
|
|
|
|
}
|
|
|
|
url = fmt.Sprintf("%s/machine/register", c.serverURL)
|
|
|
|
url = strings.Replace(url, "http:", "https:", 1)
|
|
|
|
}
|
|
|
|
bodyData, err := encode(request, serverKey, serverNoiseKey, machinePrivKey)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2022-03-08 18:53:19 +00:00
|
|
|
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyData))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2022-03-08 18:53:19 +00:00
|
|
|
res, err := httpc.Do(req)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2022-03-08 18:53:19 +00:00
|
|
|
return regen, opt.URL, fmt.Errorf("register request: %w", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-10-06 02:57:14 +00:00
|
|
|
if res.StatusCode != 200 {
|
|
|
|
msg, _ := ioutil.ReadAll(res.Body)
|
|
|
|
res.Body.Close()
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, fmt.Errorf("register request: http %d: %.200s",
|
2020-10-06 02:57:14 +00:00
|
|
|
res.StatusCode, strings.TrimSpace(string(msg)))
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
resp := tailcfg.RegisterResponse{}
|
2022-03-08 18:53:19 +00:00
|
|
|
if err := decode(res, &resp, serverKey, serverNoiseKey, machinePrivKey); err != nil {
|
2021-03-31 15:51:22 +00:00
|
|
|
c.logf("error decoding RegisterResponse with server key %s and machine key %s: %v", serverKey, machinePrivKey.Public(), err)
|
2021-04-08 04:06:31 +00:00
|
|
|
return regen, opt.URL, fmt.Errorf("register request: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-04-08 04:06:31 +00:00
|
|
|
if debugRegister {
|
|
|
|
j, _ := json.MarshalIndent(resp, "", "\t")
|
|
|
|
c.logf("RegisterResponse: %s", j)
|
|
|
|
}
|
|
|
|
|
2020-09-28 22:28:26 +00:00
|
|
|
// Log without PII:
|
|
|
|
c.logf("RegisterReq: got response; nodeKeyExpired=%v, machineAuthorized=%v; authURL=%v",
|
|
|
|
resp.NodeKeyExpired, resp.MachineAuthorized, resp.AuthURL != "")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-10-26 17:19:35 +00:00
|
|
|
if resp.Error != "" {
|
2021-11-03 22:42:40 +00:00
|
|
|
return false, "", UserVisibleError(resp.Error)
|
2021-10-26 17:19:35 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if resp.NodeKeyExpired {
|
|
|
|
if regen {
|
|
|
|
return true, "", fmt.Errorf("weird: regen=true but server says NodeKeyExpired: %v", request.NodeKey)
|
|
|
|
}
|
|
|
|
c.logf("server reports new node key %v has expired",
|
2020-03-18 22:10:46 +00:00
|
|
|
request.NodeKey.ShortString())
|
2020-02-05 22:16:58 +00:00
|
|
|
return true, "", nil
|
|
|
|
}
|
2021-05-06 03:16:44 +00:00
|
|
|
if resp.Login.Provider != "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
persist.Provider = resp.Login.Provider
|
|
|
|
}
|
2021-05-06 03:16:44 +00:00
|
|
|
if resp.Login.LoginName != "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
persist.LoginName = resp.Login.LoginName
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(crawshaw): RegisterResponse should be able to mechanically
|
|
|
|
// communicate some extra instructions from the server:
|
|
|
|
// - new node key required
|
|
|
|
// - machine key no longer supported
|
|
|
|
// - user is disabled
|
|
|
|
|
|
|
|
if resp.AuthURL != "" {
|
2020-07-02 16:45:08 +00:00
|
|
|
c.logf("AuthURL is %v", resp.AuthURL)
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v1] No AuthURL")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
if resp.AuthURL == "" {
|
|
|
|
// key rotation is complete
|
2021-10-28 21:22:51 +00:00
|
|
|
persist.PrivateNodeKey = tryingNewKey
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
|
|
|
// save it for the retry-with-URL
|
|
|
|
c.tryingNewKey = tryingNewKey
|
|
|
|
}
|
|
|
|
c.persist = persist
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return regen, "", err
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return regen, "", ctx.Err()
|
|
|
|
}
|
|
|
|
return false, resp.AuthURL, nil
|
|
|
|
}
|
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func sameEndpoints(a, b []tailcfg.Endpoint) bool {
|
2020-02-05 22:16:58 +00:00
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range a {
|
|
|
|
if a[i] != b[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-02-14 17:28:29 +00:00
|
|
|
// newEndpoints acquires c.mu and sets the local port and endpoints and reports
|
|
|
|
// whether they've changed.
|
|
|
|
//
|
|
|
|
// It does not retain the provided slice.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (c *Direct) newEndpoints(localPort uint16, endpoints []tailcfg.Endpoint) (changed bool) {
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
// Nothing new?
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if c.localPort == localPort && sameEndpoints(c.endpoints, endpoints) {
|
2020-02-05 22:16:58 +00:00
|
|
|
return false // unchanged
|
|
|
|
}
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
var epStrs []string
|
|
|
|
for _, ep := range endpoints {
|
|
|
|
epStrs = append(epStrs, ep.Addr.String())
|
|
|
|
}
|
2022-02-12 16:05:24 +00:00
|
|
|
c.logf("[v2] client.newEndpoints(%v, %v)", localPort, epStrs)
|
2020-02-05 22:16:58 +00:00
|
|
|
c.localPort = localPort
|
2020-02-14 17:28:29 +00:00
|
|
|
c.endpoints = append(c.endpoints[:0], endpoints...)
|
2020-10-14 21:01:33 +00:00
|
|
|
if len(endpoints) > 0 {
|
|
|
|
c.everEndpoints = true
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
return true // changed
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetEndpoints updates the list of locally advertised endpoints.
|
|
|
|
// It won't be replicated to the server until a *fresh* call to PollNetMap().
|
|
|
|
// You don't need to restart PollNetMap if we return changed==false.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
func (c *Direct) SetEndpoints(localPort uint16, endpoints []tailcfg.Endpoint) (changed bool) {
|
2020-02-05 22:16:58 +00:00
|
|
|
// (no log message on function entry, because it clutters the logs
|
|
|
|
// if endpoints haven't changed. newEndpoints() will log it.)
|
2020-02-14 17:28:29 +00:00
|
|
|
return c.newEndpoints(localPort, endpoints)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-10-14 21:01:33 +00:00
|
|
|
func inTest() bool { return flag.Lookup("test.v") != nil }
|
|
|
|
|
|
|
|
// PollNetMap makes a /map request to download the network map, calling cb with
|
|
|
|
// each new netmap.
|
|
|
|
//
|
|
|
|
// maxPolls is how many network maps to download; common values are 1
|
|
|
|
// or -1 (to keep a long-poll query open to the server).
|
2021-02-05 23:44:46 +00:00
|
|
|
func (c *Direct) PollNetMap(ctx context.Context, maxPolls int, cb func(*netmap.NetworkMap)) error {
|
2020-12-23 21:03:16 +00:00
|
|
|
return c.sendMapRequest(ctx, maxPolls, cb)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendLiteMapUpdate makes a /map request to update the server of our latest state,
|
|
|
|
// but does not fetch anything. It returns an error if the server did not return a
|
|
|
|
// successful 200 OK response.
|
|
|
|
func (c *Direct) SendLiteMapUpdate(ctx context.Context) error {
|
|
|
|
return c.sendMapRequest(ctx, 1, nil)
|
|
|
|
}
|
|
|
|
|
2021-04-21 21:17:21 +00:00
|
|
|
// If we go more than pollTimeout without hearing from the server,
|
|
|
|
// end the long poll. We should be receiving a keep alive ping
|
|
|
|
// every minute.
|
|
|
|
const pollTimeout = 120 * time.Second
|
|
|
|
|
2020-12-23 21:03:16 +00:00
|
|
|
// cb nil means to omit peers.
|
2021-02-05 23:44:46 +00:00
|
|
|
func (c *Direct) sendMapRequest(ctx context.Context, maxPolls int, cb func(*netmap.NetworkMap)) error {
|
2021-11-16 16:34:25 +00:00
|
|
|
metricMapRequests.Add(1)
|
|
|
|
metricMapRequestsActive.Add(1)
|
|
|
|
defer metricMapRequestsActive.Add(-1)
|
|
|
|
if maxPolls == -1 {
|
|
|
|
metricMapRequestsPoll.Add(1)
|
|
|
|
} else {
|
|
|
|
metricMapRequestsLite.Add(1)
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
persist := c.persist
|
|
|
|
serverURL := c.serverURL
|
|
|
|
serverKey := c.serverKey
|
2022-03-09 18:58:44 +00:00
|
|
|
serverNoiseKey := c.serverNoiseKey
|
2022-05-03 22:07:30 +00:00
|
|
|
hi := c.hostInfoLocked()
|
2021-11-10 16:11:14 +00:00
|
|
|
backendLogID := hi.BackendLogID
|
2020-02-05 22:16:58 +00:00
|
|
|
localPort := c.localPort
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
var epStrs []string
|
|
|
|
var epTypes []tailcfg.EndpointType
|
|
|
|
for _, ep := range c.endpoints {
|
|
|
|
epStrs = append(epStrs, ep.Addr.String())
|
|
|
|
epTypes = append(epTypes, ep.Type)
|
|
|
|
}
|
2020-10-14 21:01:33 +00:00
|
|
|
everEndpoints := c.everEndpoints
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Unlock()
|
|
|
|
|
2021-03-31 15:51:22 +00:00
|
|
|
machinePrivKey, err := c.getMachinePrivKey()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("getMachinePrivKey: %w", err)
|
|
|
|
}
|
|
|
|
if machinePrivKey.IsZero() {
|
|
|
|
return errors.New("getMachinePrivKey returned zero key")
|
|
|
|
}
|
|
|
|
|
2021-02-05 00:23:16 +00:00
|
|
|
if persist.PrivateNodeKey.IsZero() {
|
|
|
|
return errors.New("privateNodeKey is zero")
|
|
|
|
}
|
2020-06-15 23:04:12 +00:00
|
|
|
if backendLogID == "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
return errors.New("hostinfo: BackendLogID missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
allowStream := maxPolls != 1
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
c.logf("[v1] PollNetMap: stream=%v :%v ep=%v", allowStream, localPort, epStrs)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf := logger.Discard
|
2020-06-28 18:53:37 +00:00
|
|
|
if Debug.NetMap {
|
2020-12-21 18:58:06 +00:00
|
|
|
// TODO(bradfitz): update this to use "[v2]" prefix perhaps? but we don't
|
|
|
|
// want to upload it always.
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf = c.logf
|
|
|
|
}
|
|
|
|
|
2021-02-25 05:29:51 +00:00
|
|
|
request := &tailcfg.MapRequest{
|
2022-03-06 17:32:52 +00:00
|
|
|
Version: tailcfg.CurrentCapabilityVersion,
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
KeepAlive: c.keepAlive,
|
2021-11-02 03:55:52 +00:00
|
|
|
NodeKey: persist.PrivateNodeKey.Public(),
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
DiscoKey: c.discoPubKey,
|
|
|
|
Endpoints: epStrs,
|
|
|
|
EndpointTypes: epTypes,
|
|
|
|
Stream: allowStream,
|
2021-11-10 16:11:14 +00:00
|
|
|
Hostinfo: hi,
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
DebugFlags: c.debugFlags,
|
|
|
|
OmitPeers: cb == nil,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-02-18 16:58:13 +00:00
|
|
|
var extraDebugFlags []string
|
2021-11-10 16:11:14 +00:00
|
|
|
if hi != nil && c.linkMon != nil && !c.skipIPForwardingCheck &&
|
|
|
|
ipForwardingBroken(hi.RoutableIPs, c.linkMon.InterfaceState()) {
|
2021-02-18 16:58:13 +00:00
|
|
|
extraDebugFlags = append(extraDebugFlags, "warn-ip-forwarding-off")
|
|
|
|
}
|
|
|
|
if health.RouterHealth() != nil {
|
|
|
|
extraDebugFlags = append(extraDebugFlags, "warn-router-unhealthy")
|
|
|
|
}
|
2021-03-15 22:39:37 +00:00
|
|
|
if health.NetworkCategoryHealth() != nil {
|
|
|
|
extraDebugFlags = append(extraDebugFlags, "warn-network-category-unhealthy")
|
|
|
|
}
|
2021-11-10 16:09:29 +00:00
|
|
|
if hostinfo.DisabledEtcAptSource() {
|
|
|
|
extraDebugFlags = append(extraDebugFlags, "warn-etc-apt-source-disabled")
|
|
|
|
}
|
2021-02-18 16:58:13 +00:00
|
|
|
if len(extraDebugFlags) > 0 {
|
2020-11-04 21:48:50 +00:00
|
|
|
old := request.DebugFlags
|
2021-02-18 16:58:13 +00:00
|
|
|
request.DebugFlags = append(old[:len(old):len(old)], extraDebugFlags...)
|
2020-11-04 21:48:50 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if c.newDecompressor != nil {
|
|
|
|
request.Compress = "zstd"
|
|
|
|
}
|
2020-10-14 21:01:33 +00:00
|
|
|
// On initial startup before we know our endpoints, set the ReadOnly flag
|
|
|
|
// to tell the control server not to distribute out our (empty) endpoints to peers.
|
|
|
|
// Presumably we'll learn our endpoints in a half second and do another post
|
|
|
|
// with useful results. The first POST just gets us the DERP map which we
|
|
|
|
// need to do the STUN queries to discover our endpoints.
|
|
|
|
// TODO(bradfitz): we skip this optimization in tests, though,
|
|
|
|
// because the e2e tests are currently hyperspecific about the
|
|
|
|
// ordering of things. The e2e tests need love.
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 20:24:29 +00:00
|
|
|
if len(epStrs) == 0 && !everEndpoints && !inTest() {
|
2020-10-14 21:01:33 +00:00
|
|
|
request.ReadOnly = true
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
bodyData, err := encode(request, serverKey, serverNoiseKey, machinePrivKey)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: encode: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-14 21:01:33 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2021-09-03 20:17:46 +00:00
|
|
|
machinePubKey := machinePrivKey.Public()
|
2020-04-11 16:22:33 +00:00
|
|
|
t0 := time.Now()
|
2020-10-14 21:01:33 +00:00
|
|
|
|
2022-03-09 18:58:44 +00:00
|
|
|
// Url and httpc are protocol specific.
|
|
|
|
var url string
|
|
|
|
var httpc httpClient
|
|
|
|
if serverNoiseKey.IsZero() {
|
|
|
|
httpc = c.httpc
|
|
|
|
url = fmt.Sprintf("%s/machine/%s/map", serverURL, machinePubKey.UntypedHexString())
|
|
|
|
} else {
|
|
|
|
httpc, err = c.getNoiseClient()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("getNoiseClient: %w", err)
|
|
|
|
}
|
|
|
|
url = fmt.Sprintf("%s/machine/map", serverURL)
|
|
|
|
url = strings.Replace(url, "http:", "https:", 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyData))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-09 18:58:44 +00:00
|
|
|
res, err := httpc.Do(req)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: Do: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: Do = %v after %v", res.StatusCode, time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
if res.StatusCode != 200 {
|
|
|
|
msg, _ := ioutil.ReadAll(res.Body)
|
|
|
|
res.Body.Close()
|
2020-10-06 02:57:14 +00:00
|
|
|
return fmt.Errorf("initial fetch failed %d: %.200s",
|
2020-02-05 22:16:58 +00:00
|
|
|
res.StatusCode, strings.TrimSpace(string(msg)))
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
|
2021-02-25 05:29:51 +00:00
|
|
|
health.NoteMapRequestHeard(request)
|
|
|
|
|
2020-12-23 21:03:16 +00:00
|
|
|
if cb == nil {
|
|
|
|
io.Copy(ioutil.Discard, res.Body)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
timeout := time.NewTimer(pollTimeout)
|
|
|
|
timeoutReset := make(chan struct{})
|
2020-04-21 22:04:05 +00:00
|
|
|
pollDone := make(chan struct{})
|
|
|
|
defer close(pollDone)
|
2020-02-05 22:16:58 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2020-04-21 22:04:05 +00:00
|
|
|
case <-pollDone:
|
|
|
|
vlogf("netmap: ending timeout goroutine")
|
|
|
|
return
|
2020-02-05 22:16:58 +00:00
|
|
|
case <-timeout.C:
|
|
|
|
c.logf("map response long-poll timed out!")
|
|
|
|
cancel()
|
|
|
|
return
|
2020-04-21 22:04:05 +00:00
|
|
|
case <-timeoutReset:
|
2020-02-05 22:16:58 +00:00
|
|
|
if !timeout.Stop() {
|
2020-04-21 22:04:05 +00:00
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
case <-pollDone:
|
2020-04-21 22:35:37 +00:00
|
|
|
vlogf("netmap: ending timeout goroutine")
|
2020-04-21 22:04:05 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: reset timeout timer")
|
2020-02-05 22:16:58 +00:00
|
|
|
timeout.Reset(pollTimeout)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-10-28 21:22:51 +00:00
|
|
|
sess := newMapSession(persist.PrivateNodeKey)
|
2021-04-19 02:49:25 +00:00
|
|
|
sess.logf = c.logf
|
|
|
|
sess.vlogf = vlogf
|
|
|
|
sess.machinePubKey = machinePubKey
|
|
|
|
sess.keepSharerAndUserSplit = c.keepSharerAndUserSplit
|
2020-05-17 16:51:38 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// If allowStream, then the server will use an HTTP long poll to
|
|
|
|
// return incremental results. There is always one response right
|
|
|
|
// away, followed by a delay, and eventually others.
|
|
|
|
// If !allowStream, it'll still send the first result in exactly
|
|
|
|
// the same format before just closing the connection.
|
|
|
|
// We can use this same read loop either way.
|
|
|
|
var msg []byte
|
|
|
|
for i := 0; i < maxPolls || maxPolls < 0; i++ {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: starting size read after %v (poll %v)", time.Since(t0).Round(time.Millisecond), i)
|
2020-02-05 22:16:58 +00:00
|
|
|
var siz [4]byte
|
|
|
|
if _, err := io.ReadFull(res.Body, siz[:]); err != nil {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: size read error after %v: %v", time.Since(t0).Round(time.Millisecond), err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
size := binary.LittleEndian.Uint32(siz[:])
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: read size %v after %v", size, time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
msg = append(msg[:0], make([]byte, size)...)
|
|
|
|
if _, err := io.ReadFull(res.Body, msg); err != nil {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: body read error: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
var resp tailcfg.MapResponse
|
2021-09-03 20:17:46 +00:00
|
|
|
if err := c.decodeMsg(msg, &resp, machinePrivKey); err != nil {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: decode error: %v")
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-08-08 03:44:04 +00:00
|
|
|
|
2021-11-16 16:34:25 +00:00
|
|
|
metricMapResponseMessages.Add(1)
|
|
|
|
|
2021-02-25 05:29:51 +00:00
|
|
|
if allowStream {
|
|
|
|
health.GotStreamedMapResponse()
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:28:34 +00:00
|
|
|
if pr := resp.PingRequest; pr != nil && c.isUniquePingRequest(pr) {
|
2021-11-16 16:34:25 +00:00
|
|
|
metricMapResponsePings.Add(1)
|
2022-04-15 16:00:27 +00:00
|
|
|
go answerPing(c.logf, c.httpc, pr, c.pinger)
|
2021-03-05 04:54:44 +00:00
|
|
|
}
|
2022-03-21 21:10:25 +00:00
|
|
|
if u := resp.PopBrowserURL; u != "" && u != sess.lastPopBrowserURL {
|
|
|
|
sess.lastPopBrowserURL = u
|
|
|
|
if c.popBrowser != nil {
|
|
|
|
c.logf("netmap: control says to open URL %v; opening...", u)
|
|
|
|
c.popBrowser(u)
|
|
|
|
} else {
|
|
|
|
c.logf("netmap: control says to open URL %v; no popBrowser func", u)
|
|
|
|
}
|
|
|
|
}
|
2022-02-18 18:14:14 +00:00
|
|
|
if resp.ControlTime != nil && !resp.ControlTime.IsZero() {
|
2022-02-18 21:00:08 +00:00
|
|
|
c.logf.JSON(1, "controltime", resp.ControlTime.UTC())
|
2022-02-17 02:36:04 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if resp.KeepAlive {
|
2020-04-11 16:22:33 +00:00
|
|
|
vlogf("netmap: got keep-alive")
|
2020-08-07 04:24:31 +00:00
|
|
|
} else {
|
|
|
|
vlogf("netmap: got new map")
|
|
|
|
}
|
2022-03-21 21:10:25 +00:00
|
|
|
|
2020-08-07 04:24:31 +00:00
|
|
|
select {
|
|
|
|
case timeoutReset <- struct{}{}:
|
|
|
|
vlogf("netmap: sent timer reset")
|
|
|
|
case <-ctx.Done():
|
2020-12-21 18:58:06 +00:00
|
|
|
c.logf("[v1] netmap: not resetting timer; context done: %v", ctx.Err())
|
2020-08-07 04:24:31 +00:00
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
if resp.KeepAlive {
|
2021-11-16 16:34:25 +00:00
|
|
|
metricMapResponseKeepAlives.Add(1)
|
2020-02-05 22:16:58 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-08-08 03:44:04 +00:00
|
|
|
|
2021-11-16 16:34:25 +00:00
|
|
|
metricMapResponseMap.Add(1)
|
|
|
|
if i > 0 {
|
|
|
|
metricMapResponseMapDelta.Add(1)
|
|
|
|
}
|
|
|
|
|
2021-06-22 22:29:01 +00:00
|
|
|
hasDebug := resp.Debug != nil
|
|
|
|
// being conservative here, if Debug not present set to False
|
2021-07-16 05:34:50 +00:00
|
|
|
controlknobs.SetDisableUPnP(hasDebug && resp.Debug.DisableUPnP.EqualBool(true))
|
2021-06-22 22:29:01 +00:00
|
|
|
if hasDebug {
|
2021-11-01 18:50:46 +00:00
|
|
|
if code := resp.Debug.Exit; code != nil {
|
|
|
|
c.logf("exiting process with status %v per controlplane", *code)
|
|
|
|
os.Exit(*code)
|
|
|
|
}
|
2022-04-18 20:43:03 +00:00
|
|
|
if resp.Debug.DisableLogTail {
|
|
|
|
logtail.Disable()
|
|
|
|
}
|
2020-08-17 19:56:17 +00:00
|
|
|
if resp.Debug.LogHeapPprof {
|
|
|
|
go logheap.LogHeap(resp.Debug.LogHeapURL)
|
|
|
|
}
|
2021-03-03 18:17:05 +00:00
|
|
|
if resp.Debug.GoroutineDumpURL != "" {
|
|
|
|
go dumpGoroutinesToURL(c.httpc, resp.Debug.GoroutineDumpURL)
|
|
|
|
}
|
2020-08-20 20:21:25 +00:00
|
|
|
setControlAtomic(&controlUseDERPRoute, resp.Debug.DERPRoute)
|
|
|
|
setControlAtomic(&controlTrimWGConfig, resp.Debug.TrimWGConfig)
|
2021-04-21 21:17:21 +00:00
|
|
|
if sleep := time.Duration(resp.Debug.SleepSeconds * float64(time.Second)); sleep > 0 {
|
|
|
|
if err := sleepAsRequested(ctx, c.logf, timeoutReset, sleep); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-04-08 05:24:06 +00:00
|
|
|
}
|
2021-04-19 02:49:25 +00:00
|
|
|
|
|
|
|
nm := sess.netmapForResponse(&resp)
|
2021-04-19 17:56:48 +00:00
|
|
|
if nm.SelfNode == nil {
|
|
|
|
c.logf("MapResponse lacked node")
|
|
|
|
return errors.New("MapResponse lacked node")
|
|
|
|
}
|
2021-04-19 02:49:25 +00:00
|
|
|
|
2021-01-21 05:30:04 +00:00
|
|
|
if Debug.StripEndpoints {
|
|
|
|
for _, p := range resp.Peers {
|
2022-02-13 00:19:33 +00:00
|
|
|
p.Endpoints = nil
|
2021-01-21 05:30:04 +00:00
|
|
|
}
|
|
|
|
}
|
2021-04-16 17:57:46 +00:00
|
|
|
if Debug.StripCaps {
|
2021-04-19 02:49:25 +00:00
|
|
|
nm.SelfNode.Capabilities = nil
|
2021-01-12 15:54:34 +00:00
|
|
|
}
|
|
|
|
|
2020-12-23 21:03:16 +00:00
|
|
|
// Get latest localPort. This might've changed if
|
2021-08-24 14:36:48 +00:00
|
|
|
// a lite map update occurred meanwhile. This only affects
|
2020-12-23 21:03:16 +00:00
|
|
|
// the end-to-end test.
|
|
|
|
// TODO(bradfitz): remove the NetworkMap.LocalPort field entirely.
|
|
|
|
c.mu.Lock()
|
2021-04-19 02:49:25 +00:00
|
|
|
nm.LocalPort = c.localPort
|
2020-12-23 21:03:16 +00:00
|
|
|
c.mu.Unlock()
|
|
|
|
|
2021-08-17 17:05:20 +00:00
|
|
|
// Occasionally print the netmap header.
|
|
|
|
// This is handy for debugging, and our logs processing
|
|
|
|
// pipeline depends on it. (TODO: Remove this dependency.)
|
|
|
|
// Code elsewhere prints netmap diffs every time they are received.
|
2020-03-13 02:28:11 +00:00
|
|
|
now := c.timeNow()
|
|
|
|
if now.Sub(c.lastPrintMap) >= 5*time.Minute {
|
|
|
|
c.lastPrintMap = now
|
2021-08-17 17:05:20 +00:00
|
|
|
c.logf("[v1] new network map[%d]:\n%s", i, nm.VeryConcise())
|
2020-03-13 02:28:11 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
c.expiry = &nm.Expiry
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
cb(nm)
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
// decode JSON decodes the res.Body into v. If serverNoiseKey is not specified,
|
|
|
|
// it uses the serverKey and mkey to decode the message from the NaCl-crypto-box.
|
2022-03-16 23:27:57 +00:00
|
|
|
func decode(res *http.Response, v any, serverKey, serverNoiseKey key.MachinePublic, mkey key.MachinePrivate) error {
|
2020-02-05 22:16:58 +00:00
|
|
|
defer res.Body.Close()
|
|
|
|
msg, err := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if res.StatusCode != 200 {
|
|
|
|
return fmt.Errorf("%d: %v", res.StatusCode, string(msg))
|
|
|
|
}
|
2022-03-08 18:53:19 +00:00
|
|
|
if !serverNoiseKey.IsZero() {
|
|
|
|
return json.Unmarshal(msg, v)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
return decodeMsg(msg, v, serverKey, mkey)
|
|
|
|
}
|
|
|
|
|
2021-04-08 04:06:31 +00:00
|
|
|
var (
|
2022-01-24 18:52:57 +00:00
|
|
|
debugMap = envknob.Bool("TS_DEBUG_MAP")
|
|
|
|
debugRegister = envknob.Bool("TS_DEBUG_REGISTER")
|
2021-04-08 04:06:31 +00:00
|
|
|
)
|
2020-09-15 16:54:52 +00:00
|
|
|
|
2020-11-12 21:31:29 +00:00
|
|
|
var jsonEscapedZero = []byte(`\u0000`)
|
|
|
|
|
2022-03-09 18:58:44 +00:00
|
|
|
// decodeMsg is responsible for uncompressing msg and unmarshaling into v.
|
|
|
|
// If c.serverNoiseKey is not specified, it uses the c.serverKey and mkey
|
|
|
|
// to first the decrypt msg from the NaCl-crypto-box.
|
2022-03-16 23:27:57 +00:00
|
|
|
func (c *Direct) decodeMsg(msg []byte, v any, mkey key.MachinePrivate) error {
|
2020-07-09 18:42:19 +00:00
|
|
|
c.mu.Lock()
|
2020-02-05 22:16:58 +00:00
|
|
|
serverKey := c.serverKey
|
2022-03-09 18:58:44 +00:00
|
|
|
serverNoiseKey := c.serverNoiseKey
|
2020-07-09 18:42:19 +00:00
|
|
|
c.mu.Unlock()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2022-03-09 18:58:44 +00:00
|
|
|
var decrypted []byte
|
|
|
|
if serverNoiseKey.IsZero() {
|
|
|
|
var ok bool
|
|
|
|
decrypted, ok = mkey.OpenFrom(serverKey, msg)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("cannot decrypt response")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
decrypted = msg
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
var b []byte
|
|
|
|
if c.newDecompressor == nil {
|
|
|
|
b = decrypted
|
|
|
|
} else {
|
|
|
|
decoder, err := c.newDecompressor()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer decoder.Close()
|
|
|
|
b, err = decoder.DecodeAll(decrypted, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-10-20 17:40:52 +00:00
|
|
|
if debugMap {
|
2020-09-15 16:54:52 +00:00
|
|
|
var buf bytes.Buffer
|
|
|
|
json.Indent(&buf, b, "", " ")
|
|
|
|
log.Printf("MapResponse: %s", buf.Bytes())
|
|
|
|
}
|
2020-11-12 21:31:29 +00:00
|
|
|
|
|
|
|
if bytes.Contains(b, jsonEscapedZero) {
|
|
|
|
log.Printf("[unexpected] zero byte in controlclient.Direct.decodeMsg into %T: %q", v, b)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if err := json.Unmarshal(b, v); err != nil {
|
|
|
|
return fmt.Errorf("response: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-03-16 23:27:57 +00:00
|
|
|
func decodeMsg(msg []byte, v any, serverKey key.MachinePublic, machinePrivKey key.MachinePrivate) error {
|
2021-09-03 20:17:46 +00:00
|
|
|
decrypted, ok := machinePrivKey.OpenFrom(serverKey, msg)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("cannot decrypt response")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-11-12 21:31:29 +00:00
|
|
|
if bytes.Contains(decrypted, jsonEscapedZero) {
|
|
|
|
log.Printf("[unexpected] zero byte in controlclient decodeMsg into %T: %q", v, decrypted)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if err := json.Unmarshal(decrypted, v); err != nil {
|
|
|
|
return fmt.Errorf("response: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
// encode JSON encodes v. If serverNoiseKey is not specified, it uses the serverKey and mkey to
|
|
|
|
// seal the message into a NaCl-crypto-box.
|
2022-03-16 23:27:57 +00:00
|
|
|
func encode(v any, serverKey, serverNoiseKey key.MachinePublic, mkey key.MachinePrivate) ([]byte, error) {
|
2020-02-05 22:16:58 +00:00
|
|
|
b, err := json.Marshal(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-10-20 17:40:52 +00:00
|
|
|
if debugMap {
|
2021-03-29 19:42:43 +00:00
|
|
|
if _, ok := v.(*tailcfg.MapRequest); ok {
|
2020-03-04 06:21:56 +00:00
|
|
|
log.Printf("MapRequest: %s", b)
|
|
|
|
}
|
|
|
|
}
|
2022-03-08 18:53:19 +00:00
|
|
|
if !serverNoiseKey.IsZero() {
|
|
|
|
return b, nil
|
|
|
|
}
|
2021-09-03 20:17:46 +00:00
|
|
|
return mkey.SealTo(serverKey, b), nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2022-03-07 18:55:02 +00:00
|
|
|
func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string) (*tailcfg.OverTLSPublicKeyResponse, error) {
|
|
|
|
keyURL := fmt.Sprintf("%v/key?v=%d", serverURL, tailcfg.CurrentCapabilityVersion)
|
|
|
|
req, err := http.NewRequestWithContext(ctx, "GET", keyURL, nil)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2022-03-07 18:55:02 +00:00
|
|
|
return nil, fmt.Errorf("create control key request: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
res, err := httpc.Do(req)
|
|
|
|
if err != nil {
|
2022-03-07 18:55:02 +00:00
|
|
|
return nil, fmt.Errorf("fetch control key: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
2022-03-07 18:55:02 +00:00
|
|
|
b, err := ioutil.ReadAll(io.LimitReader(res.Body, 64<<10))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2022-03-07 18:55:02 +00:00
|
|
|
return nil, fmt.Errorf("fetch control key response: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if res.StatusCode != 200 {
|
2022-03-07 18:55:02 +00:00
|
|
|
return nil, fmt.Errorf("fetch control key: %d", res.StatusCode)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2022-03-07 18:55:02 +00:00
|
|
|
var out tailcfg.OverTLSPublicKeyResponse
|
|
|
|
jsonErr := json.Unmarshal(b, &out)
|
|
|
|
if jsonErr == nil {
|
2022-05-20 16:17:25 +00:00
|
|
|
if runtime.GOOS == "js" {
|
|
|
|
// As of 2022-05-20 it's not possible for js/wasm to make a bidi
|
|
|
|
// Noise connection to the control plane. Instead, for now, pretend
|
|
|
|
// like the server can't do Noise to force use of the old protocol.
|
|
|
|
out.PublicKey = key.MachinePublic{}
|
|
|
|
}
|
2022-03-07 18:55:02 +00:00
|
|
|
return &out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some old control servers might not be updated to send the new format.
|
|
|
|
// Accept the old pre-JSON format too.
|
|
|
|
out = tailcfg.OverTLSPublicKeyResponse{}
|
2021-09-03 20:17:46 +00:00
|
|
|
k, err := key.ParseMachinePublicUntyped(mem.B(b))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2022-03-07 18:55:02 +00:00
|
|
|
return nil, multierr.New(jsonErr, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2022-03-07 18:55:02 +00:00
|
|
|
out.LegacyPublicKey = k
|
|
|
|
return &out, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-06-28 18:53:37 +00:00
|
|
|
|
|
|
|
// Debug contains temporary internal-only debug knobs.
|
|
|
|
// They're unexported to not draw attention to them.
|
|
|
|
var Debug = initDebug()
|
|
|
|
|
|
|
|
type debug struct {
|
2021-01-21 05:30:04 +00:00
|
|
|
NetMap bool
|
|
|
|
ProxyDNS bool
|
|
|
|
Disco bool
|
|
|
|
StripEndpoints bool // strip endpoints from control (only use disco messages)
|
2021-04-16 17:57:46 +00:00
|
|
|
StripCaps bool // strip all local node's control-provided capabilities
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func initDebug() debug {
|
2020-09-17 18:28:09 +00:00
|
|
|
return debug{
|
2022-01-24 18:52:57 +00:00
|
|
|
NetMap: envknob.Bool("TS_DEBUG_NETMAP"),
|
|
|
|
ProxyDNS: envknob.Bool("TS_DEBUG_PROXY_DNS"),
|
|
|
|
StripEndpoints: envknob.Bool("TS_DEBUG_STRIP_ENDPOINTS"),
|
|
|
|
StripCaps: envknob.Bool("TS_DEBUG_STRIP_CAPS"),
|
|
|
|
Disco: envknob.BoolDefaultTrue("TS_DEBUG_USE_DISCO"),
|
2020-06-28 18:53:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-15 23:03:59 +00:00
|
|
|
var clockNow = time.Now
|
|
|
|
|
2020-08-20 20:21:25 +00:00
|
|
|
// opt.Bool configs from control.
|
|
|
|
var (
|
|
|
|
controlUseDERPRoute atomic.Value
|
|
|
|
controlTrimWGConfig atomic.Value
|
|
|
|
)
|
|
|
|
|
|
|
|
func setControlAtomic(dst *atomic.Value, v opt.Bool) {
|
|
|
|
old, ok := dst.Load().(opt.Bool)
|
|
|
|
if !ok || old != v {
|
|
|
|
dst.Store(v)
|
|
|
|
}
|
|
|
|
}
|
2020-08-17 19:56:17 +00:00
|
|
|
|
|
|
|
// DERPRouteFlag reports the last reported value from control for whether
|
|
|
|
// DERP route optimization (Issue 150) should be enabled.
|
|
|
|
func DERPRouteFlag() opt.Bool {
|
|
|
|
v, _ := controlUseDERPRoute.Load().(opt.Bool)
|
|
|
|
return v
|
|
|
|
}
|
2020-08-20 20:21:25 +00:00
|
|
|
|
|
|
|
// TrimWGConfig reports the last reported value from control for whether
|
|
|
|
// we should do lazy wireguard configuration.
|
|
|
|
func TrimWGConfig() opt.Bool {
|
|
|
|
v, _ := controlTrimWGConfig.Load().(opt.Bool)
|
|
|
|
return v
|
|
|
|
}
|
2020-11-04 21:48:50 +00:00
|
|
|
|
|
|
|
// ipForwardingBroken reports whether the system's IP forwarding is disabled
|
|
|
|
// and will definitely not work for the routes provided.
|
|
|
|
//
|
|
|
|
// It should not return false positives.
|
2021-03-31 18:55:21 +00:00
|
|
|
//
|
2022-03-28 17:24:11 +00:00
|
|
|
// TODO(bradfitz): Change controlclient.Options.SkipIPForwardingCheck into a
|
|
|
|
// func([]netaddr.IPPrefix) error signature instead.
|
2021-03-04 03:19:41 +00:00
|
|
|
func ipForwardingBroken(routes []netaddr.IPPrefix, state *interfaces.State) bool {
|
2022-03-28 17:24:11 +00:00
|
|
|
warn, err := netutil.CheckIPForwarding(routes, state)
|
|
|
|
if err != nil {
|
|
|
|
// Oh well, we tried. This is just for debugging.
|
|
|
|
// We don't want false positives.
|
|
|
|
// TODO: maybe we want a different warning for inability to check?
|
2020-11-04 21:48:50 +00:00
|
|
|
return false
|
|
|
|
}
|
2022-03-28 17:24:11 +00:00
|
|
|
return warn != nil
|
2020-11-04 21:48:50 +00:00
|
|
|
}
|
2021-03-05 04:54:44 +00:00
|
|
|
|
2021-06-15 19:28:34 +00:00
|
|
|
// isUniquePingRequest reports whether pr contains a new PingRequest.URL
|
|
|
|
// not already handled, noting its value when returning true.
|
|
|
|
func (c *Direct) isUniquePingRequest(pr *tailcfg.PingRequest) bool {
|
|
|
|
if pr == nil || pr.URL == "" {
|
|
|
|
// Bogus.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if pr.URL == c.lastPingURL {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
c.lastPingURL = pr.URL
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-04-15 16:00:27 +00:00
|
|
|
func answerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger) {
|
2021-03-05 04:54:44 +00:00
|
|
|
if pr.URL == "" {
|
|
|
|
logf("invalid PingRequest with no URL")
|
|
|
|
return
|
|
|
|
}
|
2022-04-15 16:00:27 +00:00
|
|
|
if pr.Types == "" {
|
|
|
|
answerHeadPing(logf, c, pr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, t := range strings.Split(pr.Types, ",") {
|
2022-04-22 01:49:01 +00:00
|
|
|
switch pt := tailcfg.PingType(t); pt {
|
|
|
|
case tailcfg.PingTSMP, tailcfg.PingDisco, tailcfg.PingICMP:
|
|
|
|
go doPingerPing(logf, c, pr, pinger, pt)
|
2022-04-15 16:00:27 +00:00
|
|
|
// TODO(tailscale/corp#754)
|
|
|
|
// case "peerapi":
|
|
|
|
default:
|
|
|
|
logf("unsupported ping request type: %q", t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func answerHeadPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest) {
|
2021-03-05 04:54:44 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
req, err := http.NewRequestWithContext(ctx, "HEAD", pr.URL, nil)
|
|
|
|
if err != nil {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("answerHeadPing: NewRequestWithContext: %v", err)
|
2021-03-05 04:54:44 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if pr.Log {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("answerHeadPing: sending HEAD ping to %v ...", pr.URL)
|
2021-03-05 04:54:44 +00:00
|
|
|
}
|
|
|
|
t0 := time.Now()
|
|
|
|
_, err = c.Do(req)
|
|
|
|
d := time.Since(t0).Round(time.Millisecond)
|
|
|
|
if err != nil {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("answerHeadPing error: %v to %v (after %v)", err, pr.URL, d)
|
2021-03-05 04:54:44 +00:00
|
|
|
} else if pr.Log {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("answerHeadPing complete to %v (after %v)", pr.URL, d)
|
2021-03-05 04:54:44 +00:00
|
|
|
}
|
|
|
|
}
|
2021-04-21 21:17:21 +00:00
|
|
|
|
|
|
|
func sleepAsRequested(ctx context.Context, logf logger.Logf, timeoutReset chan<- struct{}, d time.Duration) error {
|
|
|
|
const maxSleep = 5 * time.Minute
|
|
|
|
if d > maxSleep {
|
|
|
|
logf("sleeping for %v, capped from server-requested %v ...", maxSleep, d)
|
|
|
|
d = maxSleep
|
|
|
|
} else {
|
|
|
|
logf("sleeping for server-requested %v ...", d)
|
|
|
|
}
|
|
|
|
|
|
|
|
ticker := time.NewTicker(pollTimeout / 2)
|
|
|
|
defer ticker.Stop()
|
|
|
|
timer := time.NewTimer(d)
|
|
|
|
defer timer.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-timer.C:
|
|
|
|
return nil
|
|
|
|
case <-ticker.C:
|
|
|
|
select {
|
|
|
|
case timeoutReset <- struct{}{}:
|
|
|
|
case <-timer.C:
|
|
|
|
return nil
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-07 23:03:16 +00:00
|
|
|
|
2022-03-07 23:32:53 +00:00
|
|
|
// getNoiseClient returns the noise client, creating one if one doesn't exist.
|
|
|
|
func (c *Direct) getNoiseClient() (*noiseClient, error) {
|
|
|
|
c.mu.Lock()
|
|
|
|
serverNoiseKey := c.serverNoiseKey
|
|
|
|
nc := c.noiseClient
|
|
|
|
c.mu.Unlock()
|
|
|
|
if serverNoiseKey.IsZero() {
|
|
|
|
return nil, errors.New("zero serverNoiseKey")
|
|
|
|
}
|
|
|
|
if nc != nil {
|
|
|
|
return nc, nil
|
|
|
|
}
|
2022-03-16 23:27:57 +00:00
|
|
|
np, err, _ := c.sfGroup.Do("noise", func() (any, error) {
|
2022-03-07 23:32:53 +00:00
|
|
|
k, err := c.getMachinePrivKey()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-27 18:57:59 +00:00
|
|
|
nc, err = newNoiseClient(k, serverNoiseKey, c.serverURL, c.dialer)
|
2022-03-07 23:32:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
c.noiseClient = nc
|
|
|
|
return nc, nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return np.(*noiseClient), nil
|
|
|
|
}
|
|
|
|
|
2022-03-08 00:16:15 +00:00
|
|
|
// setDNSNoise sends the SetDNSRequest request to the control plane server over Noise,
|
|
|
|
// requesting a DNS record be created or updated.
|
|
|
|
func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) error {
|
|
|
|
newReq := *req
|
|
|
|
newReq.Version = tailcfg.CurrentCapabilityVersion
|
|
|
|
np, err := c.getNoiseClient()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
bodyData, err := json.Marshal(newReq)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
res, err := np.Post(fmt.Sprintf("https://%v/%v", np.serverHost, "machine/set-dns"), "application/json", bytes.NewReader(bodyData))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
if res.StatusCode != 200 {
|
|
|
|
msg, _ := ioutil.ReadAll(res.Body)
|
|
|
|
return fmt.Errorf("set-dns response: %v, %.200s", res.Status, strings.TrimSpace(string(msg)))
|
|
|
|
}
|
|
|
|
var setDNSRes tailcfg.SetDNSResponse
|
|
|
|
if err := json.NewDecoder(res.Body).Decode(&setDNSRes); err != nil {
|
|
|
|
c.logf("error decoding SetDNSResponse: %v", err)
|
|
|
|
return fmt.Errorf("set-dns-response: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// noiseConfigured reports whether the client can communicate with Control
|
|
|
|
// over Noise.
|
|
|
|
func (c *Direct) noiseConfigured() bool {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
return !c.serverNoiseKey.IsZero()
|
|
|
|
}
|
|
|
|
|
2021-06-07 23:03:16 +00:00
|
|
|
// SetDNS sends the SetDNSRequest request to the control plane server,
|
|
|
|
// requesting a DNS record be created or updated.
|
2021-11-16 16:34:25 +00:00
|
|
|
func (c *Direct) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) (err error) {
|
|
|
|
metricSetDNS.Add(1)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
metricSetDNSError.Add(1)
|
|
|
|
}
|
|
|
|
}()
|
2022-03-08 00:16:15 +00:00
|
|
|
if c.noiseConfigured() {
|
|
|
|
return c.setDNSNoise(ctx, req)
|
|
|
|
}
|
2021-06-07 23:03:16 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
serverKey := c.serverKey
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
if serverKey.IsZero() {
|
|
|
|
return errors.New("zero serverKey")
|
|
|
|
}
|
|
|
|
machinePrivKey, err := c.getMachinePrivKey()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("getMachinePrivKey: %w", err)
|
|
|
|
}
|
|
|
|
if machinePrivKey.IsZero() {
|
|
|
|
return errors.New("getMachinePrivKey returned zero key")
|
|
|
|
}
|
|
|
|
|
2022-03-08 18:53:19 +00:00
|
|
|
// TODO(maisem): dedupe this codepath from SetDNSNoise.
|
|
|
|
var serverNoiseKey key.MachinePublic
|
|
|
|
bodyData, err := encode(req, serverKey, serverNoiseKey, machinePrivKey)
|
2021-06-07 23:03:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
body := bytes.NewReader(bodyData)
|
|
|
|
|
2021-09-03 20:17:46 +00:00
|
|
|
u := fmt.Sprintf("%s/machine/%s/set-dns", c.serverURL, machinePrivKey.Public().UntypedHexString())
|
2021-06-07 23:03:16 +00:00
|
|
|
hreq, err := http.NewRequestWithContext(ctx, "POST", u, body)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
res, err := c.httpc.Do(hreq)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
if res.StatusCode != 200 {
|
|
|
|
msg, _ := ioutil.ReadAll(res.Body)
|
2021-06-16 22:55:02 +00:00
|
|
|
return fmt.Errorf("set-dns response: %v, %.200s", res.Status, strings.TrimSpace(string(msg)))
|
2021-06-07 23:03:16 +00:00
|
|
|
}
|
2022-03-07 20:37:06 +00:00
|
|
|
var setDNSRes tailcfg.SetDNSResponse
|
2022-03-08 18:53:19 +00:00
|
|
|
if err := decode(res, &setDNSRes, serverKey, serverNoiseKey, machinePrivKey); err != nil {
|
2021-06-07 23:03:16 +00:00
|
|
|
c.logf("error decoding SetDNSResponse with server key %s and machine key %s: %v", serverKey, machinePrivKey.Public(), err)
|
2022-03-08 00:16:15 +00:00
|
|
|
return fmt.Errorf("set-dns-response: %w", err)
|
2021-06-07 23:03:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-07-16 14:51:54 +00:00
|
|
|
|
2022-03-10 18:28:42 +00:00
|
|
|
func (c *Direct) DoNoiseRequest(req *http.Request) (*http.Response, error) {
|
|
|
|
nc, err := c.getNoiseClient()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return nc.Do(req)
|
|
|
|
}
|
|
|
|
|
2022-04-15 16:00:27 +00:00
|
|
|
// doPingerPing sends a Ping to pr.IP using pinger, and sends an http request back to
|
|
|
|
// pr.URL with ping response data.
|
2022-04-22 01:49:01 +00:00
|
|
|
func doPingerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger, pingType tailcfg.PingType) {
|
2022-04-15 16:00:27 +00:00
|
|
|
if pr.URL == "" || pr.IP.IsZero() || pinger == nil {
|
|
|
|
logf("invalid ping request: missing url, ip or pinger")
|
|
|
|
return
|
2021-07-16 14:51:54 +00:00
|
|
|
}
|
2022-04-15 16:00:27 +00:00
|
|
|
start := time.Now()
|
2022-04-22 01:49:01 +00:00
|
|
|
pinger.Ping(pr.IP, pingType, func(res *ipnstate.PingResult) {
|
2021-07-16 14:51:54 +00:00
|
|
|
// Currently does not check for error since we just return if it fails.
|
2022-04-15 16:00:27 +00:00
|
|
|
postPingResult(start, logf, c, pr, res.ToPingResponse(pingType))
|
2021-07-16 14:51:54 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-04-15 16:00:27 +00:00
|
|
|
func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, res *tailcfg.PingResponse) error {
|
|
|
|
duration := time.Since(start)
|
2021-07-16 14:51:54 +00:00
|
|
|
if pr.Log {
|
2022-04-15 16:00:27 +00:00
|
|
|
if res.Err == "" {
|
|
|
|
logf("ping to %v completed in %v. pinger.Ping took %v seconds", pr.IP, res.LatencySeconds, duration)
|
|
|
|
} else {
|
|
|
|
logf("ping to %v failed after %v: %v", pr.IP, duration, res.Err)
|
|
|
|
}
|
2021-07-16 14:51:54 +00:00
|
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
jsonPingRes, err := json.Marshal(res)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Send the results of the Ping, back to control URL.
|
2022-04-15 16:00:27 +00:00
|
|
|
req, err := http.NewRequestWithContext(ctx, "POST", pr.URL, bytes.NewReader(jsonPingRes))
|
2021-07-16 14:51:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("http.NewRequestWithContext(%q): %w", pr.URL, err)
|
|
|
|
}
|
|
|
|
if pr.Log {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("postPingResult: sending ping results to %v ...", pr.URL)
|
2021-07-16 14:51:54 +00:00
|
|
|
}
|
|
|
|
t0 := time.Now()
|
|
|
|
_, err = c.Do(req)
|
|
|
|
d := time.Since(t0).Round(time.Millisecond)
|
|
|
|
if err != nil {
|
2022-04-15 16:00:27 +00:00
|
|
|
return fmt.Errorf("postPingResult error: %w to %v (after %v)", err, pr.URL, d)
|
2021-07-16 14:51:54 +00:00
|
|
|
} else if pr.Log {
|
2022-04-15 16:00:27 +00:00
|
|
|
logf("postPingResult complete to %v (after %v)", pr.URL, d)
|
2021-07-16 14:51:54 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2021-11-16 16:34:25 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
metricMapRequestsActive = clientmetric.NewGauge("controlclient_map_requests_active")
|
|
|
|
|
|
|
|
metricMapRequests = clientmetric.NewCounter("controlclient_map_requests")
|
|
|
|
metricMapRequestsLite = clientmetric.NewCounter("controlclient_map_requests_lite")
|
|
|
|
metricMapRequestsPoll = clientmetric.NewCounter("controlclient_map_requests_poll")
|
|
|
|
|
|
|
|
metricMapResponseMessages = clientmetric.NewCounter("controlclient_map_response_message") // any message type
|
|
|
|
metricMapResponsePings = clientmetric.NewCounter("controlclient_map_response_ping")
|
|
|
|
metricMapResponseKeepAlives = clientmetric.NewCounter("controlclient_map_response_keepalive")
|
|
|
|
metricMapResponseMap = clientmetric.NewCounter("controlclient_map_response_map") // any non-keepalive map response
|
|
|
|
metricMapResponseMapDelta = clientmetric.NewCounter("controlclient_map_response_map_delta") // 2nd+ non-keepalive map response
|
|
|
|
|
|
|
|
metricSetDNS = clientmetric.NewCounter("controlclient_setdns")
|
|
|
|
metricSetDNSError = clientmetric.NewCounter("controlclient_setdns_error")
|
|
|
|
)
|