2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
package derphttp
|
|
|
|
|
|
|
|
import (
|
2023-02-02 00:29:05 +00:00
|
|
|
"bytes"
|
2020-02-18 18:08:51 +00:00
|
|
|
"context"
|
2020-02-05 22:16:58 +00:00
|
|
|
"crypto/tls"
|
2024-04-06 17:43:47 +00:00
|
|
|
"fmt"
|
2020-02-05 22:16:58 +00:00
|
|
|
"net"
|
|
|
|
"net/http"
|
2024-05-03 14:01:42 +00:00
|
|
|
"net/http/httptest"
|
2020-02-05 22:16:58 +00:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"tailscale.com/derp"
|
2024-04-27 05:06:20 +00:00
|
|
|
"tailscale.com/net/netmon"
|
2024-11-08 00:49:47 +00:00
|
|
|
"tailscale.com/tstest/deptest"
|
2020-02-17 21:52:11 +00:00
|
|
|
"tailscale.com/types/key"
|
2024-11-08 00:49:47 +00:00
|
|
|
"tailscale.com/util/set"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestSendRecv(t *testing.T) {
|
2021-10-28 22:42:50 +00:00
|
|
|
serverPrivateKey := key.NewNode()
|
2020-08-17 23:14:07 +00:00
|
|
|
|
2024-04-27 05:06:20 +00:00
|
|
|
netMon := netmon.NewStatic()
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
const numClients = 3
|
2021-10-28 22:42:50 +00:00
|
|
|
var clientPrivateKeys []key.NodePrivate
|
|
|
|
var clientKeys []key.NodePublic
|
2024-04-16 20:15:13 +00:00
|
|
|
for range numClients {
|
2021-10-28 22:42:50 +00:00
|
|
|
priv := key.NewNode()
|
2020-08-17 23:14:07 +00:00
|
|
|
clientPrivateKeys = append(clientPrivateKeys, priv)
|
|
|
|
clientKeys = append(clientKeys, priv.Public())
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s := derp.NewServer(serverPrivateKey, t.Logf)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
httpsrv := &http.Server{
|
|
|
|
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
|
|
|
Handler: Handler(s),
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := net.Listen("tcp4", "localhost:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
serverURL := "http://" + ln.Addr().String()
|
|
|
|
t.Logf("server URL: %s", serverURL)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
if err := httpsrv.Serve(ln); err != nil {
|
|
|
|
if err == http.ErrServerClosed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var clients []*Client
|
|
|
|
var recvChs []chan []byte
|
|
|
|
done := make(chan struct{})
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
defer func() {
|
|
|
|
close(done)
|
|
|
|
for _, c := range clients {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}()
|
2024-04-16 20:15:13 +00:00
|
|
|
for i := range numClients {
|
2020-02-05 22:16:58 +00:00
|
|
|
key := clientPrivateKeys[i]
|
2024-04-27 05:06:20 +00:00
|
|
|
c, err := NewClient(key, serverURL, t.Logf, netMon)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("client %d: %v", i, err)
|
|
|
|
}
|
2020-02-18 18:08:51 +00:00
|
|
|
if err := c.Connect(context.Background()); err != nil {
|
|
|
|
t.Fatalf("client %d Connect: %v", i, err)
|
|
|
|
}
|
2020-08-17 23:14:07 +00:00
|
|
|
waitConnect(t, c)
|
2020-02-05 22:16:58 +00:00
|
|
|
clients = append(clients, c)
|
|
|
|
recvChs = append(recvChs, make(chan []byte))
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2020-06-15 17:26:50 +00:00
|
|
|
m, err := c.Recv()
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2020-08-17 23:14:07 +00:00
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Logf("client%d: %v", i, err)
|
|
|
|
break
|
|
|
|
}
|
2020-02-21 03:10:54 +00:00
|
|
|
switch m := m.(type) {
|
|
|
|
default:
|
|
|
|
t.Errorf("unexpected message type %T", m)
|
|
|
|
continue
|
2020-03-23 21:13:49 +00:00
|
|
|
case derp.PeerGoneMessage:
|
|
|
|
// Ignore.
|
2020-02-21 03:10:54 +00:00
|
|
|
case derp.ReceivedPacket:
|
2023-02-02 00:29:05 +00:00
|
|
|
recvChs[i] <- bytes.Clone(m.Data)
|
2020-02-21 03:10:54 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
recv := func(i int, want string) {
|
|
|
|
t.Helper()
|
|
|
|
select {
|
|
|
|
case b := <-recvChs[i]:
|
|
|
|
if got := string(b); got != want {
|
|
|
|
t.Errorf("client1.Recv=%q, want %q", got, want)
|
|
|
|
}
|
2020-08-17 23:14:07 +00:00
|
|
|
case <-time.After(5 * time.Second):
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Errorf("client%d.Recv, got nothing, want %q", i, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
recvNothing := func(i int) {
|
|
|
|
t.Helper()
|
|
|
|
select {
|
|
|
|
case b := <-recvChs[0]:
|
|
|
|
t.Errorf("client%d.Recv=%q, want nothing", i, string(b))
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msg1 := []byte("hello 0->1\n")
|
|
|
|
if err := clients[0].Send(clientKeys[1], msg1); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
recv(1, string(msg1))
|
|
|
|
recvNothing(0)
|
|
|
|
recvNothing(2)
|
|
|
|
|
|
|
|
msg2 := []byte("hello 1->2\n")
|
|
|
|
if err := clients[1].Send(clientKeys[2], msg2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
recv(2, string(msg2))
|
|
|
|
recvNothing(0)
|
|
|
|
recvNothing(1)
|
2020-08-17 23:14:07 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-08-17 23:14:07 +00:00
|
|
|
func waitConnect(t testing.TB, c *Client) {
|
|
|
|
t.Helper()
|
|
|
|
if m, err := c.Recv(); err != nil {
|
|
|
|
t.Fatalf("client first Recv: %v", err)
|
|
|
|
} else if v, ok := m.(derp.ServerInfoMessage); !ok {
|
|
|
|
t.Fatalf("client first Recv was unexpected type %T", v)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-12-28 01:58:33 +00:00
|
|
|
|
|
|
|
func TestPing(t *testing.T) {
|
|
|
|
serverPrivateKey := key.NewNode()
|
|
|
|
s := derp.NewServer(serverPrivateKey, t.Logf)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
httpsrv := &http.Server{
|
|
|
|
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
|
|
|
Handler: Handler(s),
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := net.Listen("tcp4", "localhost:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
serverURL := "http://" + ln.Addr().String()
|
|
|
|
t.Logf("server URL: %s", serverURL)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
if err := httpsrv.Serve(ln); err != nil {
|
|
|
|
if err == http.ErrServerClosed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2024-04-27 05:06:20 +00:00
|
|
|
c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic())
|
2021-12-28 01:58:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("NewClient: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
if err := c.Connect(context.Background()); err != nil {
|
|
|
|
t.Fatalf("client Connect: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
m, err := c.Recv()
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.Logf("Recv: %T", m)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
err = c.Ping(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Ping: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2023-10-25 20:41:24 +00:00
|
|
|
|
|
|
|
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) {
|
|
|
|
s = derp.NewServer(k, t.Logf)
|
|
|
|
httpsrv := &http.Server{
|
|
|
|
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
|
|
|
Handler: Handler(s),
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := net.Listen("tcp4", "localhost:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
serverURL = "http://" + ln.Addr().String()
|
|
|
|
s.SetMeshKey("1234")
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
if err := httpsrv.Serve(ln); err != nil {
|
|
|
|
if err == http.ErrServerClosed {
|
|
|
|
t.Logf("server closed")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) {
|
2024-04-27 05:06:20 +00:00
|
|
|
c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic())
|
2023-10-25 20:41:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
c.MeshKey = "1234"
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// breakConnection breaks the connection, which should trigger a reconnect.
|
|
|
|
func (c *Client) breakConnection(brokenClient *derp.Client) {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
if c.client != brokenClient {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.netConn != nil {
|
|
|
|
c.netConn.Close()
|
|
|
|
c.netConn = nil
|
|
|
|
}
|
|
|
|
c.client = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that a watcher connection successfully reconnects and processes peer
|
|
|
|
// updates after a different thread breaks and reconnects the connection, while
|
|
|
|
// the watcher is waiting on recv().
|
|
|
|
func TestBreakWatcherConnRecv(t *testing.T) {
|
2023-10-31 22:12:41 +00:00
|
|
|
// Set the wait time before a retry after connection failure to be much lower.
|
|
|
|
// This needs to be early in the test, for defer to run right at the end after
|
|
|
|
// the DERP client has finished.
|
|
|
|
origRetryInterval := retryInterval
|
|
|
|
retryInterval = 50 * time.Millisecond
|
|
|
|
defer func() { retryInterval = origRetryInterval }()
|
|
|
|
|
2023-10-25 20:41:24 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
defer wg.Wait()
|
|
|
|
// Make the watcher server
|
|
|
|
serverPrivateKey1 := key.NewNode()
|
|
|
|
_, s1 := newTestServer(t, serverPrivateKey1)
|
|
|
|
defer s1.Close()
|
|
|
|
|
|
|
|
// Make the watched server
|
|
|
|
serverPrivateKey2 := key.NewNode()
|
|
|
|
serverURL2, s2 := newTestServer(t, serverPrivateKey2)
|
|
|
|
defer s2.Close()
|
|
|
|
|
|
|
|
// Make the watcher (but it is not connected yet)
|
|
|
|
watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2)
|
|
|
|
defer watcher1.Close()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
watcherChan := make(chan int, 1)
|
|
|
|
|
|
|
|
// Start the watcher thread (which connects to the watched server)
|
|
|
|
wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var peers int
|
2024-06-25 15:04:12 +00:00
|
|
|
add := func(m derp.PeerPresentMessage) {
|
|
|
|
t.Logf("add: %v", m.Key.ShortString())
|
2023-10-25 20:41:24 +00:00
|
|
|
peers++
|
|
|
|
// Signal that the watcher has run
|
|
|
|
watcherChan <- peers
|
|
|
|
}
|
2024-06-25 15:04:12 +00:00
|
|
|
remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- }
|
2023-10-25 20:41:24 +00:00
|
|
|
|
|
|
|
watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove)
|
|
|
|
}()
|
|
|
|
|
|
|
|
timer := time.NewTimer(5 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
// Wait for the watcher to run, then break the connection and check if it
|
|
|
|
// reconnected and received peer updates.
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 10 {
|
2023-10-25 20:41:24 +00:00
|
|
|
select {
|
|
|
|
case peers := <-watcherChan:
|
|
|
|
if peers != 1 {
|
|
|
|
t.Fatal("wrong number of peers added during watcher connection")
|
|
|
|
}
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatalf("watcher did not process the peer update")
|
|
|
|
}
|
|
|
|
watcher1.breakConnection(watcher1.client)
|
|
|
|
// re-establish connection by sending a packet
|
|
|
|
watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
|
|
|
|
|
|
|
|
timer.Reset(5 * time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that a watcher connection successfully reconnects and processes peer
|
|
|
|
// updates after a different thread breaks and reconnects the connection, while
|
|
|
|
// the watcher is not waiting on recv().
|
|
|
|
func TestBreakWatcherConn(t *testing.T) {
|
2023-10-31 22:12:41 +00:00
|
|
|
// Set the wait time before a retry after connection failure to be much lower.
|
|
|
|
// This needs to be early in the test, for defer to run right at the end after
|
|
|
|
// the DERP client has finished.
|
|
|
|
origRetryInterval := retryInterval
|
|
|
|
retryInterval = 50 * time.Millisecond
|
|
|
|
defer func() { retryInterval = origRetryInterval }()
|
|
|
|
|
2023-10-25 20:41:24 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
defer wg.Wait()
|
|
|
|
// Make the watcher server
|
|
|
|
serverPrivateKey1 := key.NewNode()
|
|
|
|
_, s1 := newTestServer(t, serverPrivateKey1)
|
|
|
|
defer s1.Close()
|
|
|
|
|
|
|
|
// Make the watched server
|
|
|
|
serverPrivateKey2 := key.NewNode()
|
|
|
|
serverURL2, s2 := newTestServer(t, serverPrivateKey2)
|
|
|
|
defer s2.Close()
|
|
|
|
|
|
|
|
// Make the watcher (but it is not connected yet)
|
|
|
|
watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2)
|
|
|
|
defer watcher1.Close()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
watcherChan := make(chan int, 1)
|
|
|
|
breakerChan := make(chan bool, 1)
|
|
|
|
|
|
|
|
// Start the watcher thread (which connects to the watched server)
|
|
|
|
wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var peers int
|
2024-06-25 15:04:12 +00:00
|
|
|
add := func(m derp.PeerPresentMessage) {
|
|
|
|
t.Logf("add: %v", m.Key.ShortString())
|
2023-10-25 20:41:24 +00:00
|
|
|
peers++
|
|
|
|
// Signal that the watcher has run
|
|
|
|
watcherChan <- peers
|
|
|
|
// Wait for breaker to run
|
|
|
|
<-breakerChan
|
|
|
|
}
|
2024-06-25 15:04:12 +00:00
|
|
|
remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- }
|
2023-10-25 20:41:24 +00:00
|
|
|
|
|
|
|
watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove)
|
|
|
|
}()
|
|
|
|
|
|
|
|
timer := time.NewTimer(5 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
// Wait for the watcher to run, then break the connection and check if it
|
|
|
|
// reconnected and received peer updates.
|
2024-04-16 20:15:13 +00:00
|
|
|
for range 10 {
|
2023-10-25 20:41:24 +00:00
|
|
|
select {
|
|
|
|
case peers := <-watcherChan:
|
|
|
|
if peers != 1 {
|
|
|
|
t.Fatal("wrong number of peers added during watcher connection")
|
|
|
|
}
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatalf("watcher did not process the peer update")
|
|
|
|
}
|
|
|
|
watcher1.breakConnection(watcher1.client)
|
|
|
|
// re-establish connection by sending a packet
|
|
|
|
watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
|
|
|
|
// signal that the breaker is done
|
|
|
|
breakerChan <- true
|
|
|
|
|
|
|
|
timer.Reset(5 * time.Second)
|
|
|
|
}
|
|
|
|
}
|
2023-11-04 02:42:52 +00:00
|
|
|
|
2024-06-25 15:04:12 +00:00
|
|
|
func noopAdd(derp.PeerPresentMessage) {}
|
|
|
|
func noopRemove(derp.PeerGoneMessage) {}
|
2023-11-04 02:42:52 +00:00
|
|
|
|
|
|
|
func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
|
|
|
|
defer func() { testHookWatchLookConnectResult = nil }()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
priv := key.NewNode()
|
|
|
|
serverURL, s := newTestServer(t, priv)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
pub := priv.Public()
|
|
|
|
|
|
|
|
watcher := newWatcherClient(t, priv, serverURL)
|
|
|
|
defer watcher.Close()
|
|
|
|
|
|
|
|
// Test connecting to ourselves, and that we get hung up on.
|
|
|
|
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
|
|
|
|
t.Helper()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error connecting to server: %v", err)
|
|
|
|
}
|
|
|
|
if !wasSelfConnect {
|
|
|
|
t.Error("wanted self-connect; wasn't")
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove)
|
|
|
|
|
|
|
|
// Test connecting to the server with a zero value for ignoreServerKey,
|
|
|
|
// so we should always connect.
|
|
|
|
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
|
|
|
|
t.Helper()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error connecting to server: %v", err)
|
|
|
|
}
|
|
|
|
if wasSelfConnect {
|
|
|
|
t.Error("wanted normal connect; got self connect")
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove)
|
|
|
|
}
|
2024-04-06 17:43:47 +00:00
|
|
|
|
|
|
|
// verify that the LocalAddr method doesn't acquire the mutex.
|
|
|
|
// See https://github.com/tailscale/tailscale/issues/11519
|
|
|
|
func TestLocalAddrNoMutex(t *testing.T) {
|
|
|
|
var c Client
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock() // not needed in test but for symmetry
|
|
|
|
|
|
|
|
_, err := c.LocalAddr()
|
|
|
|
if got, want := fmt.Sprint(err), "client not connected"; got != want {
|
|
|
|
t.Errorf("got error %q; want %q", got, want)
|
|
|
|
}
|
|
|
|
}
|
2024-05-03 14:01:42 +00:00
|
|
|
|
|
|
|
func TestProbe(t *testing.T) {
|
|
|
|
h := Handler(nil)
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
path string
|
|
|
|
want int
|
|
|
|
}{
|
|
|
|
{"/derp/probe", 200},
|
|
|
|
{"/derp/latency-check", 200},
|
|
|
|
{"/derp/sdf", http.StatusUpgradeRequired},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
rec := httptest.NewRecorder()
|
|
|
|
h.ServeHTTP(rec, httptest.NewRequest("GET", tt.path, nil))
|
|
|
|
if got := rec.Result().StatusCode; got != tt.want {
|
|
|
|
t.Errorf("for path %q got HTTP status %v; want %v", tt.path, got, tt.want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-11-08 00:49:47 +00:00
|
|
|
|
|
|
|
func TestDeps(t *testing.T) {
|
|
|
|
deptest.DepChecker{
|
|
|
|
GOOS: "darwin",
|
|
|
|
GOARCH: "arm64",
|
|
|
|
BadDeps: map[string]string{
|
|
|
|
"github.com/coder/websocket": "shouldn't link websockets except on js/wasm",
|
|
|
|
},
|
|
|
|
}.Check(t)
|
|
|
|
|
|
|
|
deptest.DepChecker{
|
|
|
|
GOOS: "darwin",
|
|
|
|
GOARCH: "arm64",
|
|
|
|
Tags: "ts_debug_websockets",
|
|
|
|
WantDeps: set.Of(
|
|
|
|
"github.com/coder/websocket",
|
|
|
|
),
|
|
|
|
}.Check(t)
|
|
|
|
|
|
|
|
}
|