2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package ipnserver
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"net"
|
2020-03-26 05:57:46 +00:00
|
|
|
"net/http"
|
2020-02-05 22:16:58 +00:00
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"os/signal"
|
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"tailscale.com/control/controlclient"
|
|
|
|
"tailscale.com/ipn"
|
|
|
|
"tailscale.com/logtail/backoff"
|
|
|
|
"tailscale.com/safesocket"
|
2020-07-02 18:26:33 +00:00
|
|
|
"tailscale.com/smallzstd"
|
2020-02-15 03:23:16 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-02-16 02:14:50 +00:00
|
|
|
"tailscale.com/version"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/wgengine"
|
|
|
|
)
|
|
|
|
|
2020-02-16 02:14:50 +00:00
|
|
|
// Options is the configuration of the Tailscale node agent.
|
2020-02-05 22:16:58 +00:00
|
|
|
type Options struct {
|
2020-02-18 20:33:28 +00:00
|
|
|
// SocketPath, on unix systems, is the unix socket path to listen
|
|
|
|
// on for frontend connections.
|
|
|
|
SocketPath string
|
2020-07-08 21:15:33 +00:00
|
|
|
|
2020-02-18 20:33:28 +00:00
|
|
|
// Port, on windows, is the localhost TCP port to listen on for
|
|
|
|
// frontend connections.
|
|
|
|
Port int
|
2020-07-08 21:15:33 +00:00
|
|
|
|
2020-02-16 02:14:50 +00:00
|
|
|
// StatePath is the path to the stored agent state.
|
|
|
|
StatePath string
|
2020-07-08 21:15:33 +00:00
|
|
|
|
2020-02-16 02:14:50 +00:00
|
|
|
// AutostartStateKey, if non-empty, immediately starts the agent
|
|
|
|
// using the given StateKey. If empty, the agent stays idle and
|
|
|
|
// waits for a frontend to start it.
|
|
|
|
AutostartStateKey ipn.StateKey
|
2020-07-08 21:15:33 +00:00
|
|
|
|
2020-02-20 07:23:34 +00:00
|
|
|
// LegacyConfigPath optionally specifies the old-style relaynode
|
|
|
|
// relay.conf location. If both LegacyConfigPath and
|
|
|
|
// AutostartStateKey are specified and the requested state doesn't
|
|
|
|
// exist in the backend store, the backend migrates the config
|
|
|
|
// from LegacyConfigPath.
|
|
|
|
//
|
|
|
|
// TODO(danderson): remove some time after the transition to
|
|
|
|
// tailscaled is done.
|
|
|
|
LegacyConfigPath string
|
2020-07-08 21:15:33 +00:00
|
|
|
|
2020-02-16 02:14:50 +00:00
|
|
|
// SurviveDisconnects specifies how the server reacts to its
|
|
|
|
// frontend disconnecting. If true, the server keeps running on
|
|
|
|
// its existing state, and accepts new frontend connections. If
|
|
|
|
// false, the server dumps its state and becomes idle.
|
2020-07-15 19:23:36 +00:00
|
|
|
//
|
|
|
|
// To support CLI connections (notably, "tailscale status"),
|
|
|
|
// the actual definition of "disconnect" is when the
|
|
|
|
// connection count transitions from 1 to 0.
|
2020-02-05 22:16:58 +00:00
|
|
|
SurviveDisconnects bool
|
2020-03-26 05:57:46 +00:00
|
|
|
|
|
|
|
// DebugMux, if non-nil, specifies an HTTP ServeMux in which
|
|
|
|
// to register a debug handler.
|
|
|
|
DebugMux *http.ServeMux
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
// server is an IPN backend and its set of 0 or more active connections
|
|
|
|
// talking to an IPN backend.
|
|
|
|
type server struct {
|
|
|
|
resetOnZero bool // call bs.Reset on transition from 1->0 connections
|
|
|
|
|
|
|
|
bsMu sync.Mutex // lock order: bsMu, then mu
|
|
|
|
bs *ipn.BackendServer
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
clients map[net.Conn]bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *server) serveConn(ctx context.Context, c net.Conn, logf logger.Logf) {
|
|
|
|
s.addConn(c)
|
|
|
|
logf("incoming control connection")
|
|
|
|
defer s.removeAndCloseConn(c)
|
|
|
|
for ctx.Err() == nil {
|
|
|
|
msg, err := ipn.ReadMsg(c)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-07-15 19:23:36 +00:00
|
|
|
if ctx.Err() == nil {
|
|
|
|
logf("ReadMsg: %v", err)
|
|
|
|
}
|
|
|
|
return
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
s.bsMu.Lock()
|
|
|
|
if err := s.bs.GotCommandMsg(msg); err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("GotCommandMsg: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
gotQuit := s.bs.GotQuit
|
|
|
|
s.bsMu.Unlock()
|
|
|
|
if gotQuit {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *server) addConn(c net.Conn) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
if s.clients == nil {
|
|
|
|
s.clients = map[net.Conn]bool{}
|
|
|
|
}
|
|
|
|
s.clients[c] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *server) removeAndCloseConn(c net.Conn) {
|
|
|
|
s.mu.Lock()
|
|
|
|
delete(s.clients, c)
|
|
|
|
remain := len(s.clients)
|
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
if remain == 0 && s.resetOnZero {
|
|
|
|
s.bsMu.Lock()
|
|
|
|
s.bs.Reset()
|
|
|
|
s.bsMu.Unlock()
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *server) stopAll() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
for c := range s.clients {
|
|
|
|
safesocket.ConnCloseRead(c)
|
|
|
|
safesocket.ConnCloseWrite(c)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
s.clients = nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
func (s *server) writeToClients(b []byte) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
for c := range s.clients {
|
|
|
|
ipn.WriteMsg(c, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-29 20:38:09 +00:00
|
|
|
// Run runs a Tailscale backend service.
|
|
|
|
// The getEngine func is called repeatedly, once per connection, until it returns an engine successfully.
|
|
|
|
func Run(ctx context.Context, logf logger.Logf, logid string, getEngine func() (wgengine.Engine, error), opts Options) error {
|
2020-07-08 21:15:33 +00:00
|
|
|
runDone := make(chan struct{})
|
|
|
|
defer close(runDone)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-18 20:33:28 +00:00
|
|
|
listen, _, err := safesocket.Listen(opts.SocketPath, uint16(opts.Port))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("safesocket.Listen: %v", err)
|
|
|
|
}
|
2020-02-25 15:36:32 +00:00
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
server := &server{
|
|
|
|
resetOnZero: !opts.SurviveDisconnects,
|
|
|
|
}
|
|
|
|
|
|
|
|
// When the context is closed or when we return, whichever is first, close our listner
|
|
|
|
// and all open connections.
|
2020-02-16 02:14:50 +00:00
|
|
|
go func() {
|
2020-02-25 15:36:32 +00:00
|
|
|
select {
|
2020-07-15 19:23:36 +00:00
|
|
|
case <-ctx.Done():
|
2020-02-25 15:36:32 +00:00
|
|
|
case <-runDone:
|
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
server.stopAll()
|
2020-02-16 02:14:50 +00:00
|
|
|
listen.Close()
|
|
|
|
}()
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("Listening on %v", listen.Addr())
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-07-08 21:15:33 +00:00
|
|
|
bo := backoff.NewBackoff("ipnserver", logf)
|
|
|
|
|
2020-07-29 20:38:09 +00:00
|
|
|
eng, err := getEngine()
|
|
|
|
if err != nil {
|
|
|
|
logf("Initial getEngine call: %v", err)
|
2020-07-15 19:23:36 +00:00
|
|
|
for i := 1; ctx.Err() == nil; i++ {
|
2020-07-08 21:15:33 +00:00
|
|
|
s, err := listen.Accept()
|
|
|
|
if err != nil {
|
|
|
|
logf("%d: Accept: %v", i, err)
|
2020-07-15 19:23:36 +00:00
|
|
|
bo.BackOff(ctx, err)
|
2020-07-08 21:15:33 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-07-29 20:38:09 +00:00
|
|
|
logf("%d: trying getEngine again...", i)
|
|
|
|
eng, err = getEngine()
|
|
|
|
if err == nil {
|
|
|
|
logf("%d: GetEngine worked; exiting failure loop", i)
|
|
|
|
break
|
2020-07-08 21:15:33 +00:00
|
|
|
}
|
2020-07-29 20:38:09 +00:00
|
|
|
logf("%d: getEngine failed again: %v", i, err)
|
|
|
|
errMsg := err.Error()
|
2020-07-08 21:15:33 +00:00
|
|
|
go func() {
|
|
|
|
defer s.Close()
|
2020-07-29 20:38:09 +00:00
|
|
|
serverToClient := func(b []byte) { ipn.WriteMsg(s, b) }
|
2020-07-08 21:15:33 +00:00
|
|
|
bs := ipn.NewBackendServer(logf, nil, serverToClient)
|
2020-07-29 20:38:09 +00:00
|
|
|
bs.SendErrorMessage(errMsg)
|
2020-07-08 21:15:33 +00:00
|
|
|
s.Read(make([]byte, 1))
|
|
|
|
}()
|
|
|
|
}
|
2020-07-29 22:15:05 +00:00
|
|
|
if err := ctx.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-08 21:15:33 +00:00
|
|
|
}
|
|
|
|
|
2020-02-03 18:35:52 +00:00
|
|
|
var store ipn.StateStore
|
|
|
|
if opts.StatePath != "" {
|
|
|
|
store, err = ipn.NewFileStore(opts.StatePath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("ipn.NewFileStore(%q): %v", opts.StatePath, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
store = &ipn.MemoryStore{}
|
|
|
|
}
|
|
|
|
|
2020-07-29 20:38:09 +00:00
|
|
|
b, err := ipn.NewLocalBackend(logf, logid, store, eng)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("NewLocalBackend: %v", err)
|
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
defer b.Shutdown()
|
2020-02-05 22:16:58 +00:00
|
|
|
b.SetDecompressor(func() (controlclient.Decompressor, error) {
|
2020-07-02 18:26:33 +00:00
|
|
|
return smallzstd.NewDecoder(nil)
|
2020-02-05 22:16:58 +00:00
|
|
|
})
|
|
|
|
|
2020-03-26 05:57:46 +00:00
|
|
|
if opts.DebugMux != nil {
|
|
|
|
opts.DebugMux.HandleFunc("/debug/ipn", func(w http.ResponseWriter, r *http.Request) {
|
2020-03-27 20:26:35 +00:00
|
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
|
|
|
st := b.Status()
|
|
|
|
// TODO(bradfitz): add LogID and opts to st?
|
|
|
|
st.WriteHTML(w)
|
2020-03-26 05:57:46 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
server.bs = ipn.NewBackendServer(logf, b, server.writeToClients)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-16 02:14:50 +00:00
|
|
|
if opts.AutostartStateKey != "" {
|
2020-07-15 19:23:36 +00:00
|
|
|
server.bs.GotCommand(&ipn.Command{
|
2020-02-16 02:14:50 +00:00
|
|
|
Version: version.LONG,
|
|
|
|
Start: &ipn.StartArgs{
|
|
|
|
Opts: ipn.Options{
|
2020-02-20 07:23:34 +00:00
|
|
|
StateKey: opts.AutostartStateKey,
|
|
|
|
LegacyConfigPath: opts.LegacyConfigPath,
|
2020-02-16 02:14:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-07-15 19:23:36 +00:00
|
|
|
for i := 1; ctx.Err() == nil; i++ {
|
|
|
|
c, err := listen.Accept()
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-07-15 19:23:36 +00:00
|
|
|
if ctx.Err() == nil {
|
|
|
|
logf("ipnserver: Accept: %v", err)
|
|
|
|
bo.BackOff(ctx, err)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
go server.serveConn(ctx, c, logger.WithPrefix(logf, fmt.Sprintf("ipnserver: conn%d: ", i)))
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-07-15 19:23:36 +00:00
|
|
|
return ctx.Err()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func BabysitProc(ctx context.Context, args []string, logf logger.Logf) {
|
|
|
|
|
|
|
|
executable, err := os.Executable()
|
|
|
|
if err != nil {
|
|
|
|
panic("cannot determine executable: " + err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
var proc struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
p *os.Process
|
|
|
|
}
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
interrupt := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
var sig os.Signal
|
|
|
|
select {
|
|
|
|
case sig = <-interrupt:
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("BabysitProc: got signal: %v", sig)
|
2020-02-05 22:16:58 +00:00
|
|
|
close(done)
|
|
|
|
case <-ctx.Done():
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("BabysitProc: context done")
|
2020-02-05 22:16:58 +00:00
|
|
|
sig = os.Kill
|
|
|
|
close(done)
|
|
|
|
}
|
|
|
|
|
|
|
|
proc.mu.Lock()
|
|
|
|
proc.p.Signal(sig)
|
|
|
|
proc.mu.Unlock()
|
|
|
|
}()
|
|
|
|
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
bo := backoff.NewBackoff("BabysitProc", logf)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
startTime := time.Now()
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Printf("exec: %#v %v", executable, args)
|
2020-02-05 22:16:58 +00:00
|
|
|
cmd := exec.Command(executable, args...)
|
|
|
|
|
|
|
|
// Create a pipe object to use as the subproc's stdin.
|
|
|
|
// When the writer goes away, the reader gets EOF.
|
|
|
|
// A subproc can watch its stdin and exit when it gets EOF;
|
|
|
|
// this is a very reliable way to have a subproc die when
|
|
|
|
// its parent (us) disappears.
|
|
|
|
// We never need to actually write to wStdin.
|
|
|
|
rStdin, wStdin, err := os.Pipe()
|
|
|
|
if err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Printf("os.Pipe 1: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a pipe object to use as the subproc's stdout/stderr.
|
|
|
|
// We'll read from this pipe and send it to logf, line by line.
|
|
|
|
// We can't use os.exec's io.Writer for this because it
|
|
|
|
// doesn't care about lines, and thus ends up merging multiple
|
|
|
|
// log lines into one or splitting one line into multiple
|
|
|
|
// logf() calls. bufio is more appropriate.
|
|
|
|
rStdout, wStdout, err := os.Pipe()
|
|
|
|
if err != nil {
|
2020-04-11 15:35:34 +00:00
|
|
|
log.Printf("os.Pipe 2: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
go func(r *os.File) {
|
|
|
|
defer r.Close()
|
|
|
|
rb := bufio.NewReader(r)
|
|
|
|
for {
|
|
|
|
s, err := rb.ReadString('\n')
|
|
|
|
if s != "" {
|
2020-04-11 15:35:34 +00:00
|
|
|
logf("%s", s)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(rStdout)
|
|
|
|
|
|
|
|
cmd.Stdin = rStdin
|
|
|
|
cmd.Stdout = wStdout
|
|
|
|
cmd.Stderr = wStdout
|
|
|
|
err = cmd.Start()
|
|
|
|
|
|
|
|
// Now that the subproc is started, get rid of our copy of the
|
|
|
|
// pipe reader. Bad things happen on Windows if more than one
|
|
|
|
// process owns the read side of a pipe.
|
|
|
|
rStdin.Close()
|
|
|
|
wStdout.Close()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("starting subprocess failed: %v", err)
|
|
|
|
} else {
|
|
|
|
proc.mu.Lock()
|
|
|
|
proc.p = cmd.Process
|
|
|
|
proc.mu.Unlock()
|
|
|
|
|
|
|
|
err = cmd.Wait()
|
|
|
|
log.Printf("subprocess exited: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the process finishes, clean up the write side of the
|
|
|
|
// pipe. We'll make a new one when we restart the subproc.
|
|
|
|
wStdin.Close()
|
|
|
|
|
|
|
|
if time.Since(startTime) < 60*time.Second {
|
|
|
|
bo.BackOff(ctx, fmt.Errorf("subproc early exit: %v", err))
|
|
|
|
} else {
|
|
|
|
// Reset the timeout, since the process ran for a while.
|
|
|
|
bo.BackOff(ctx, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-29 20:38:09 +00:00
|
|
|
|
|
|
|
// FixedEngine returns a func that returns eng and a nil error.
|
|
|
|
func FixedEngine(eng wgengine.Engine) func() (wgengine.Engine, error) {
|
|
|
|
return func() (wgengine.Engine, error) { return eng, nil }
|
|
|
|
}
|