derp/derpserver: split off derp.Server out of derp into its own package

This exports a number of things from the derp (generic + client) package
to be used by the new derpserver package, as now used by cmd/derper.

And then enough other misc changes to lock in that cmd/tailscaled can
be configured to not bring in tailscale.com/client/local. (The webclient
in particular, even when disabled, was bringing it in, so that's now fixed)

Fixes #17257

Change-Id: I88b6c7958643fb54f386dd900bddf73d2d4d96d5
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick
2025-09-23 17:07:48 -07:00
committed by Brad Fitzpatrick
parent df747f1c1b
commit 21dc5f4e21
35 changed files with 1442 additions and 1319 deletions

View File

@@ -522,7 +522,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien
// just to get routed into the server's HTTP Handler so it
// can Hijack the request, but we signal with a special header
// that we don't want to deal with its HTTP response.
req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response
req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response
if err := req.Write(brw); err != nil {
return nil, 0, err
}

View File

@@ -1,115 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
import (
"fmt"
"log"
"net/http"
"strings"
"tailscale.com/derp"
)
// fastStartHeader is the header (with value "1") that signals to the HTTP
// server that the DERP HTTP client does not want the HTTP 101 response
// headers and it will begin writing & reading the DERP protocol immediately
// following its HTTP request.
const fastStartHeader = "Derp-Fast-Start"
// Handler returns an http.Handler to be mounted at /derp, serving s.
func Handler(s *derp.Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// These are installed both here and in cmd/derper. The check here
// catches both cmd/derper run with DERP disabled (STUN only mode) as
// well as DERP being run in tests with derphttp.Handler directly,
// as netcheck still assumes this replies.
switch r.URL.Path {
case "/derp/probe", "/derp/latency-check":
ProbeHandler(w, r)
return
}
up := strings.ToLower(r.Header.Get("Upgrade"))
if up != "websocket" && up != "derp" {
if up != "" {
log.Printf("Weird upgrade: %q", up)
}
http.Error(w, "DERP requires connection upgrade", http.StatusUpgradeRequired)
return
}
fastStart := r.Header.Get(fastStartHeader) == "1"
h, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "HTTP does not support general TCP support", 500)
return
}
netConn, conn, err := h.Hijack()
if err != nil {
log.Printf("Hijack failed: %v", err)
http.Error(w, "HTTP does not support general TCP support", 500)
return
}
if !fastStart {
pubKey := s.PublicKey()
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
"Upgrade: DERP\r\n"+
"Connection: Upgrade\r\n"+
"Derp-Version: %v\r\n"+
"Derp-Public-Key: %s\r\n\r\n",
derp.ProtocolVersion,
pubKey.UntypedHexString())
}
if v := r.Header.Get(derp.IdealNodeHeader); v != "" {
ctx = derp.IdealNodeContextKey.WithValue(ctx, v)
}
s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String())
})
}
// ProbeHandler is the endpoint that clients without UDP access (including js/wasm) hit to measure
// DERP latency, as a replacement for UDP STUN queries.
func ProbeHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "HEAD", "GET":
w.Header().Set("Access-Control-Allow-Origin", "*")
default:
http.Error(w, "bogus probe method", http.StatusMethodNotAllowed)
}
}
// ServeNoContent generates the /generate_204 response used by Tailscale's
// captive portal detection.
func ServeNoContent(w http.ResponseWriter, r *http.Request) {
if challenge := r.Header.Get(NoContentChallengeHeader); challenge != "" {
badChar := strings.IndexFunc(challenge, func(r rune) bool {
return !isChallengeChar(r)
}) != -1
if len(challenge) <= 64 && !badChar {
w.Header().Set(NoContentResponseHeader, "response "+challenge)
}
}
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0")
w.WriteHeader(http.StatusNoContent)
}
func isChallengeChar(c rune) bool {
// Semi-randomly chosen as a limited set of valid characters
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') ||
c == '.' || c == '-' || c == '_' || c == ':'
}
const (
NoContentChallengeHeader = "X-Tailscale-Challenge"
NoContentResponseHeader = "X-Tailscale-Response"
)

View File

@@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
package derphttp_test
import (
"bytes"
@@ -21,9 +21,12 @@ import (
"time"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/net/netx"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
"tailscale.com/types/key"
)
@@ -41,12 +44,12 @@ func TestSendRecv(t *testing.T) {
clientKeys = append(clientKeys, priv.Public())
}
s := derp.NewServer(serverPrivateKey, t.Logf)
s := derpserver.NewServer(serverPrivateKey, t.Logf)
defer s.Close()
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@@ -65,7 +68,7 @@ func TestSendRecv(t *testing.T) {
}
}()
var clients []*Client
var clients []*derphttp.Client
var recvChs []chan []byte
done := make(chan struct{})
var wg sync.WaitGroup
@@ -78,7 +81,7 @@ func TestSendRecv(t *testing.T) {
}()
for i := range numClients {
key := clientPrivateKeys[i]
c, err := NewClient(key, serverURL, t.Logf, netMon)
c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon)
if err != nil {
t.Fatalf("client %d: %v", i, err)
}
@@ -158,7 +161,7 @@ func TestSendRecv(t *testing.T) {
recvNothing(1)
}
func waitConnect(t testing.TB, c *Client) {
func waitConnect(t testing.TB, c *derphttp.Client) {
t.Helper()
if m, err := c.Recv(); err != nil {
t.Fatalf("client first Recv: %v", err)
@@ -169,12 +172,12 @@ func waitConnect(t testing.TB, c *Client) {
func TestPing(t *testing.T) {
serverPrivateKey := key.NewNode()
s := derp.NewServer(serverPrivateKey, t.Logf)
s := derpserver.NewServer(serverPrivateKey, t.Logf)
defer s.Close()
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@@ -193,7 +196,7 @@ func TestPing(t *testing.T) {
}
}()
c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic())
c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic())
if err != nil {
t.Fatalf("NewClient: %v", err)
}
@@ -221,11 +224,11 @@ func TestPing(t *testing.T) {
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) {
s = derp.NewServer(k, t.Logf)
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) {
s = derpserver.NewServer(k, t.Logf)
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S
return
}
func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) {
c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic())
func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) {
c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic())
if err != nil {
t.Fatal(err)
}
@@ -260,30 +263,16 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW
return
}
// breakConnection breaks the connection, which should trigger a reconnect.
func (c *Client) breakConnection(brokenClient *derp.Client) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != brokenClient {
return
}
if c.netConn != nil {
c.netConn.Close()
c.netConn = nil
}
c.client = nil
}
// Test that a watcher connection successfully reconnects and processes peer
// updates after a different thread breaks and reconnects the connection, while
// the watcher is waiting on recv().
func TestBreakWatcherConnRecv(t *testing.T) {
// TODO(bradfitz): use synctest + memnet instead
// Set the wait time before a retry after connection failure to be much lower.
// This needs to be early in the test, for defer to run right at the end after
// the DERP client has finished.
origRetryInterval := retryInterval
retryInterval = 50 * time.Millisecond
defer func() { retryInterval = origRetryInterval }()
tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond)
var wg sync.WaitGroup
// Make the watcher server
@@ -301,11 +290,11 @@ func TestBreakWatcherConnRecv(t *testing.T) {
defer watcher.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
watcherChan := make(chan int, 1)
defer close(watcherChan)
errChan := make(chan error, 1)
defer close(errChan)
// Start the watcher thread (which connects to the watched server)
wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343
@@ -320,7 +309,10 @@ func TestBreakWatcherConnRecv(t *testing.T) {
}
remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- }
notifyErr := func(err error) {
errChan <- err
select {
case errChan <- err:
case <-ctx.Done():
}
}
watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr)
@@ -345,7 +337,7 @@ func TestBreakWatcherConnRecv(t *testing.T) {
t.Fatalf("watcher did not process the peer update")
}
timer.Reset(5 * time.Second)
watcher.breakConnection(watcher.client)
watcher.BreakConnection(watcher)
// re-establish connection by sending a packet
watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
}
@@ -357,12 +349,12 @@ func TestBreakWatcherConnRecv(t *testing.T) {
// updates after a different thread breaks and reconnects the connection, while
// the watcher is not waiting on recv().
func TestBreakWatcherConn(t *testing.T) {
// TODO(bradfitz): use synctest + memnet instead
// Set the wait time before a retry after connection failure to be much lower.
// This needs to be early in the test, for defer to run right at the end after
// the DERP client has finished.
origRetryInterval := retryInterval
retryInterval = 50 * time.Millisecond
defer func() { retryInterval = origRetryInterval }()
tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond)
var wg sync.WaitGroup
// Make the watcher server
@@ -428,7 +420,7 @@ func TestBreakWatcherConn(t *testing.T) {
case <-timer.C:
t.Fatalf("watcher did not process the peer update")
}
watcher1.breakConnection(watcher1.client)
watcher1.BreakConnection(watcher1)
// re-establish connection by sending a packet
watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
// signal that the breaker is done
@@ -446,7 +438,7 @@ func noopRemove(derp.PeerGoneMessage) {}
func noopNotifyError(error) {}
func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
defer func() { testHookWatchLookConnectResult = nil }()
defer derphttp.SetTestHookWatchLookConnectResult(nil)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@@ -461,7 +453,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
defer watcher.Close()
// Test connecting to ourselves, and that we get hung up on.
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err != nil {
t.Fatalf("error connecting to server: %v", err)
@@ -470,12 +462,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
t.Error("wanted self-connect; wasn't")
}
return false
}
})
watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError)
// Test connecting to the server with a zero value for ignoreServerKey,
// so we should always connect.
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err != nil {
t.Fatalf("error connecting to server: %v", err)
@@ -484,16 +476,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
t.Error("wanted normal connect; got self connect")
}
return false
}
})
watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError)
}
// verify that the LocalAddr method doesn't acquire the mutex.
// See https://github.com/tailscale/tailscale/issues/11519
func TestLocalAddrNoMutex(t *testing.T) {
var c Client
c.mu.Lock()
defer c.mu.Unlock() // not needed in test but for symmetry
var c derphttp.Client
_, err := c.LocalAddr()
if got, want := fmt.Sprint(err), "client not connected"; got != want {
@@ -502,7 +492,7 @@ func TestLocalAddrNoMutex(t *testing.T) {
}
func TestProbe(t *testing.T) {
h := Handler(nil)
h := derpserver.Handler(nil)
tests := []struct {
path string
@@ -523,7 +513,7 @@ func TestProbe(t *testing.T) {
}
func TestNotifyError(t *testing.T) {
defer func() { testHookWatchLookConnectResult = nil }()
defer derphttp.SetTestHookWatchLookConnectResult(nil)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
@@ -541,7 +531,7 @@ func TestNotifyError(t *testing.T) {
}))
defer watcher.Close()
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err == nil {
t.Fatal("expected error connecting to server, got nil")
@@ -550,7 +540,7 @@ func TestNotifyError(t *testing.T) {
t.Error("wanted normal connect; got self connect")
}
return false
}
})
errChan := make(chan error, 1)
notifyError := func(err error) {
@@ -587,7 +577,7 @@ func TestManualDial(t *testing.T) {
region := slices.Sorted(maps.Keys(dm.Regions))[0]
netMon := netmon.NewStatic()
rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion {
rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion {
return dm.Regions[region]
})
defer rc.Close()
@@ -625,7 +615,7 @@ func TestURLDial(t *testing.T) {
}
}
netMon := netmon.NewStatic()
c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon)
c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon)
defer c.Close()
if err := c.Connect(context.Background()); err != nil {

View File

@@ -0,0 +1,24 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) {
testHookWatchLookConnectResult = f
}
// breakConnection breaks the connection, which should trigger a reconnect.
func (c *Client) BreakConnection(brokenClient *Client) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != brokenClient.client {
return
}
if c.netConn != nil {
c.netConn.Close()
c.netConn = nil
}
c.client = nil
}
var RetryInterval = &retryInterval