mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-13 22:47:30 +00:00
client, cmd/tailscale/cli, feature/relayserver, net/udprelay: implement tailscale debug peer-relay-sessions
Fixes tailscale/corp#30035 Signed-off-by: Dylan Bargatze <dylan@tailscale.com>
This commit is contained in:
@@ -35,6 +35,7 @@ import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/net/udprelay/status"
|
||||
"tailscale.com/paths"
|
||||
"tailscale.com/safesocket"
|
||||
"tailscale.com/tailcfg"
|
||||
@@ -1638,6 +1639,16 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DebugPeerRelaySessions returns debug information about the current peer
|
||||
// relay sessions running through this node.
|
||||
func (lc *Client) DebugPeerRelaySessions(ctx context.Context) (*status.ServerStatus, error) {
|
||||
body, err := lc.send(ctx, "GET", "/localapi/v0/debug-peer-relay-sessions", 200, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error %w: %s", err, body)
|
||||
}
|
||||
return decodeJSON[*status.ServerStatus](body)
|
||||
}
|
||||
|
||||
// StreamDebugCapture streams a pcap-formatted packet capture.
|
||||
//
|
||||
// The provided context does not determine the lifetime of the
|
||||
|
@@ -122,6 +122,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
||||
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
|
||||
tailscale.com/net/tsaddr from tailscale.com/ipn+
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local
|
||||
tailscale.com/net/wsconn from tailscale.com/cmd/derper
|
||||
tailscale.com/paths from tailscale.com/client/local
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/local
|
||||
|
@@ -877,6 +877,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/tsd+
|
||||
tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/local+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
|
85
cmd/tailscale/cli/debug-peer-relay.go
Normal file
85
cmd/tailscale/cli/debug-peer-relay.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !ts_omit_relayserver
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"tailscale.com/net/udprelay/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
debugPeerRelayCmd = mkDebugPeerRelaySessionsCmd
|
||||
}
|
||||
|
||||
func mkDebugPeerRelaySessionsCmd() *ffcli.Command {
|
||||
return &ffcli.Command{
|
||||
Name: "peer-relay-sessions",
|
||||
ShortUsage: "tailscale debug peer-relay-sessions",
|
||||
Exec: runPeerRelaySessions,
|
||||
ShortHelp: "Print the current set of active peer relay sessions relayed through this node",
|
||||
}
|
||||
}
|
||||
|
||||
func runPeerRelaySessions(ctx context.Context, args []string) error {
|
||||
srv, err := localClient.DebugPeerRelaySessions(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) }
|
||||
|
||||
valid_state := false
|
||||
f("Server status : ")
|
||||
switch srv.State {
|
||||
case status.Disabled:
|
||||
f("disabled (via node capability attribute 'disable-relay-server')")
|
||||
case status.ShutDown:
|
||||
f("shut down")
|
||||
case status.NotConfigured:
|
||||
f("not configured (you can configure the port with 'sudo tailscale set --relay-server-port=<PORT>')")
|
||||
case status.Uninitialized:
|
||||
valid_state = true
|
||||
f("listening on port %v", srv.UDPPort)
|
||||
case status.Running:
|
||||
valid_state = true
|
||||
f("running on port %v", srv.UDPPort)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected status.ServerState: %#v", srv.State))
|
||||
}
|
||||
|
||||
f("\n")
|
||||
if !valid_state {
|
||||
Stdout.Write(buf.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
f("Active sessions: %d\n", len(srv.Sessions))
|
||||
if len(srv.Sessions) == 0 {
|
||||
Stdout.Write(buf.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
slices.SortFunc(srv.Sessions, func(s1, s2 status.ServerSession) int { return cmp.Compare(s1.VNI, s2.VNI) })
|
||||
f("\n%-8s %-41s %-55s %-55s\n", "VNI", "Server", "Client 1", "Client 2")
|
||||
for _, s := range srv.Sessions {
|
||||
f("%-8d %-41s %-55s %-55s\n",
|
||||
s.VNI,
|
||||
s.Server.String(),
|
||||
s.Client1.String(),
|
||||
s.Client2.String(),
|
||||
)
|
||||
}
|
||||
|
||||
Stdout.Write(buf.Bytes())
|
||||
return nil
|
||||
}
|
@@ -50,6 +50,7 @@ import (
|
||||
|
||||
var (
|
||||
debugCaptureCmd func() *ffcli.Command // or nil
|
||||
debugPeerRelayCmd func() *ffcli.Command // or nil
|
||||
)
|
||||
|
||||
func debugCmd() *ffcli.Command {
|
||||
@@ -374,6 +375,7 @@ func debugCmd() *ffcli.Command {
|
||||
ShortHelp: "Print the current set of candidate peer relay servers",
|
||||
Exec: runPeerRelayServers,
|
||||
},
|
||||
ccall(debugPeerRelayCmd),
|
||||
}...),
|
||||
}
|
||||
}
|
||||
|
@@ -138,6 +138,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
||||
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
|
||||
tailscale.com/net/tsaddr from tailscale.com/client/web+
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local+
|
||||
tailscale.com/paths from tailscale.com/client/local+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/local+
|
||||
tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+
|
||||
|
@@ -350,6 +350,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/net/udprelay from tailscale.com/feature/relayserver
|
||||
tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local+
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/local+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
|
@@ -307,6 +307,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/tsd+
|
||||
tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/local+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
|
@@ -6,14 +6,19 @@
|
||||
package relayserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/disco"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnext"
|
||||
"tailscale.com/ipn/localapi"
|
||||
"tailscale.com/net/udprelay"
|
||||
"tailscale.com/net/udprelay/endpoint"
|
||||
"tailscale.com/net/udprelay/status"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -29,6 +34,37 @@ const featureName = "relayserver"
|
||||
func init() {
|
||||
feature.Register(featureName)
|
||||
ipnext.RegisterExtension(featureName, newExtension)
|
||||
localapi.Register("debug-peer-relay-sessions", servePeerRelayDebugSessions)
|
||||
}
|
||||
|
||||
// servePeerRelayDebugSessions is an HTTP handler for the Local API that
|
||||
// returns debug/status information for peer relay sessions being relayed by
|
||||
// this Tailscale node. It writes a JSON-encoded [status.ServerStatus] into the
|
||||
// HTTP response, or returns an HTTP 405/500 with error text as the body.
|
||||
func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "GET required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
var e *extension
|
||||
if ok := h.LocalBackend().FindMatchingExtension(&e); !ok {
|
||||
http.Error(w, "peer relay server extension unavailable", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := e.status()
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to retrieve peer relay server status: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.Marshal(st)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to marshal json: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
// newExtension is an [ipnext.NewExtensionFn] that creates a new relay server
|
||||
@@ -59,6 +95,27 @@ type extension struct {
|
||||
type relayServer interface {
|
||||
AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error)
|
||||
Close() error
|
||||
GetSessions() ([]status.ServerSession, error)
|
||||
}
|
||||
|
||||
// PeerRelaySessionsReq is an empty event bus message type, used to send an
|
||||
// async request for peer relay status information to the peer relay server's
|
||||
// event loop. The server should respond with a [PeerRelaySessionsResp] via the
|
||||
// event bus.
|
||||
type PeerRelaySessionsReq struct{}
|
||||
|
||||
// PeerRelaySessionsResp is an event bus message type containing peer relay
|
||||
// status information. Sent by the peer relay server in response to a
|
||||
// [PeerRelaySessionsReq] message.
|
||||
type PeerRelaySessionsResp struct {
|
||||
// Status is the current status/config of the peer relay server and all of
|
||||
// its peer relay sessions (if any). May be the zero value if Error is
|
||||
// populated.
|
||||
Status status.ServerStatus
|
||||
// Error contains any error generated by the peer relay server while trying
|
||||
// to gather status; it may or may not be populated regardless of whether
|
||||
// the Status field is valid.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Name implements [ipnext.Extension].
|
||||
@@ -119,6 +176,8 @@ func (e *extension) consumeEventbusTopics(port int) {
|
||||
defer close(e.busDoneCh)
|
||||
|
||||
eventClient := e.bus.Client("relayserver.extension")
|
||||
debugReqSub := eventbus.Subscribe[PeerRelaySessionsReq](eventClient)
|
||||
debugRespPub := eventbus.Publish[PeerRelaySessionsResp](eventClient)
|
||||
reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient)
|
||||
respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient)
|
||||
defer eventClient.Close()
|
||||
@@ -137,6 +196,32 @@ func (e *extension) consumeEventbusTopics(port int) {
|
||||
// If reqSub is done, the eventClient has been closed, which is a
|
||||
// signal to return.
|
||||
return
|
||||
case <-debugReqSub.Events():
|
||||
st := status.ServerStatus{
|
||||
State: status.Uninitialized,
|
||||
UDPPort: port,
|
||||
Sessions: nil,
|
||||
}
|
||||
if rs == nil {
|
||||
// Don't initialize the server simply for a debug request;
|
||||
// return the status as-is.
|
||||
resp := PeerRelaySessionsResp{st, nil}
|
||||
debugRespPub.Publish(resp)
|
||||
continue
|
||||
}
|
||||
// We know the server is [status.Running] because rs != nil, which
|
||||
// can only be the case if the port is configured and peer relaying
|
||||
// isn't disabled by node attribute.
|
||||
st.State = status.Running
|
||||
sessions, err := rs.GetSessions()
|
||||
if err != nil {
|
||||
prs_err := fmt.Errorf("error retrieving peer relay sessions: %v", err)
|
||||
e.logf(prs_err.Error())
|
||||
debugRespPub.Publish(PeerRelaySessionsResp{Error: prs_err})
|
||||
continue
|
||||
}
|
||||
st.Sessions = sessions
|
||||
debugRespPub.Publish(PeerRelaySessionsResp{st, nil})
|
||||
case req := <-reqSub.Events():
|
||||
if rs == nil {
|
||||
var err error
|
||||
@@ -188,3 +273,52 @@ func (e *extension) Shutdown() error {
|
||||
e.shutdown = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// status gathers and returns current peer relay server status information for
|
||||
// this Tailscale node, including if this node is disabled/not configured as a
|
||||
// peer relay server, and status of each peer relay session this node is
|
||||
// relaying (if any).
|
||||
func (e *extension) status() (status.ServerStatus, error) {
|
||||
st := status.ServerStatus{
|
||||
State: status.Uninitialized,
|
||||
UDPPort: -1,
|
||||
Sessions: nil,
|
||||
}
|
||||
|
||||
e.mu.Lock()
|
||||
running := e.busDoneCh != nil
|
||||
shutdown := e.shutdown
|
||||
port := e.port
|
||||
disabled := e.hasNodeAttrDisableRelayServer
|
||||
e.mu.Unlock()
|
||||
|
||||
if port == nil {
|
||||
st.State = status.NotConfigured
|
||||
return st, nil
|
||||
}
|
||||
|
||||
st.UDPPort = *port
|
||||
if disabled {
|
||||
st.State = status.Disabled
|
||||
return st, nil
|
||||
}
|
||||
|
||||
if shutdown {
|
||||
st.State = status.ShutDown
|
||||
return st, nil
|
||||
}
|
||||
|
||||
if !running {
|
||||
// Leave state as Uninitialized.
|
||||
return st, nil
|
||||
}
|
||||
|
||||
client := e.bus.Client("relayserver.debug-peer-relay-sessions")
|
||||
defer client.Close()
|
||||
debugReqPub := eventbus.Publish[PeerRelaySessionsReq](client)
|
||||
debugRespSub := eventbus.Subscribe[PeerRelaySessionsResp](client)
|
||||
|
||||
debugReqPub.Publish(PeerRelaySessionsReq{})
|
||||
resp := <-debugRespSub.Events()
|
||||
return resp.Status, resp.Error
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/net/udprelay/endpoint"
|
||||
"tailscale.com/net/udprelay/status"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -90,10 +91,15 @@ type serverEndpoint struct {
|
||||
boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg
|
||||
lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time
|
||||
challenge [2][disco.BindUDPRelayChallengeLen]byte
|
||||
packetsRx [2]uint64 // num packets received from/sent by each client after active state reached
|
||||
bytesRx [2]uint64 // num bytes received from/sent by each client after active state reached
|
||||
|
||||
lamportID uint64
|
||||
vni uint32
|
||||
allocatedAt time.Time
|
||||
|
||||
// status is the overall state of this server endpoint's peer relay session establishment/operation.
|
||||
status status.SessionStatus
|
||||
}
|
||||
|
||||
func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, conn *net.UDPConn, serverDisco key.DiscoPublic) {
|
||||
@@ -150,6 +156,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex
|
||||
box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil))
|
||||
reply = append(reply, box...)
|
||||
conn.WriteMsgUDPAddrPort(reply, nil, from)
|
||||
e.status = status.Binding
|
||||
return
|
||||
case *disco.BindUDPRelayEndpointAnswer:
|
||||
err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon)
|
||||
@@ -167,6 +174,13 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex
|
||||
}
|
||||
// Handshake complete. Update the binding for this sender.
|
||||
e.boundAddrPorts[senderIndex] = from
|
||||
|
||||
// If both clients have bound into the endpoint, we've moved from the
|
||||
// Binding phase to the Pinging phase of peer relay session
|
||||
// establishment.
|
||||
if e.isBound() {
|
||||
e.status = status.Pinging
|
||||
}
|
||||
e.lastSeen[senderIndex] = time.Now() // record last seen as bound time
|
||||
return
|
||||
default:
|
||||
@@ -220,13 +234,24 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade
|
||||
case from == e.boundAddrPorts[0]:
|
||||
e.lastSeen[0] = time.Now()
|
||||
to = e.boundAddrPorts[1]
|
||||
e.packetsRx[0]++
|
||||
e.bytesRx[0] += uint64(len(b))
|
||||
case from == e.boundAddrPorts[1]:
|
||||
e.lastSeen[1] = time.Now()
|
||||
to = e.boundAddrPorts[0]
|
||||
e.packetsRx[1]++
|
||||
e.bytesRx[1] += uint64(len(b))
|
||||
default:
|
||||
// unrecognized source
|
||||
return
|
||||
}
|
||||
|
||||
// If we reach here and packets are flowing bidirectionally, the
|
||||
// Pinging phase of session establishment is complete and the session
|
||||
// is active.
|
||||
if e.status == status.Pinging && e.packetsRx[0] > 1 && e.packetsRx[1] > 1 {
|
||||
e.status = status.Active
|
||||
}
|
||||
// Relay the packet towards the other party via the socket associated
|
||||
// with the destination's address family. If source and destination
|
||||
// address families are matching we tx on the same socket the packet
|
||||
@@ -237,6 +262,7 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade
|
||||
} else if otherAFSocket != nil {
|
||||
otherAFSocket.WriteMsgUDPAddrPort(b, nil, to)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -644,6 +670,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv
|
||||
discoPubKeys: pair,
|
||||
lamportID: s.lamportID,
|
||||
allocatedAt: time.Now(),
|
||||
status: status.Allocating,
|
||||
}
|
||||
e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0])
|
||||
e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1])
|
||||
@@ -663,3 +690,52 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv
|
||||
SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// extractClientInfo constructs a [status.ClientInfo] for one of the two peer
|
||||
// relay clients involved in this session.
|
||||
func extractClientInfo(idx int, ep *serverEndpoint) status.ClientInfo {
|
||||
if idx != 0 && idx != 1 {
|
||||
panic(fmt.Sprintf("idx passed to extractClientInfo() must be 0 or 1; got %d", idx))
|
||||
}
|
||||
|
||||
// If neither the bound or handshake addrports are valid, just pass on the
|
||||
// invalid zero value; users need to call ClientInfo.Endpoint.IsValid()
|
||||
// before use.
|
||||
var ap netip.AddrPort
|
||||
if ep.boundAddrPorts[idx].IsValid() {
|
||||
ap = ep.boundAddrPorts[idx]
|
||||
} else if ep.handshakeAddrPorts[idx].IsValid() {
|
||||
ap = ep.handshakeAddrPorts[idx]
|
||||
}
|
||||
return status.ClientInfo{
|
||||
Endpoint: ap,
|
||||
ShortDisco: ep.discoPubKeys.Get()[idx].ShortString(),
|
||||
PacketsTx: ep.packetsRx[idx],
|
||||
BytesTx: ep.bytesRx[idx],
|
||||
}
|
||||
}
|
||||
|
||||
// GetSessions returns an array of peer relay session statuses, with each
|
||||
// entry containing detailed info about the server and clients involved in
|
||||
// each session. This information is intended for debugging/status UX, and
|
||||
// should not be relied on for any purpose outside of that.
|
||||
func (s *Server) GetSessions() ([]status.ServerSession, error) {
|
||||
var sessions = make([]status.ServerSession, 0)
|
||||
for _, se := range s.byDisco {
|
||||
c1 := extractClientInfo(0, se)
|
||||
c2 := extractClientInfo(1, se)
|
||||
si := status.ServerInfo{
|
||||
// TODO (dylan): Is this the correct addrPort to be using here?
|
||||
Endpoint: s.addrPorts[0],
|
||||
ShortDisco: s.discoPublic.ShortString(),
|
||||
}
|
||||
sessions = append(sessions, status.ServerSession{
|
||||
Status: se.status,
|
||||
VNI: se.vni,
|
||||
Client1: c1,
|
||||
Client2: c2,
|
||||
Server: si,
|
||||
})
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
196
net/udprelay/status/status.go
Normal file
196
net/udprelay/status/status.go
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package status contains types relating to the status of peer relay sessions
|
||||
// between peer relay client nodes via a peer relay server.
|
||||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
// ServerState is the current state of the peer relay server extension.
|
||||
type ServerState int
|
||||
|
||||
const (
|
||||
// Uninitialized indicates the peer relay server hasn't been initialized
|
||||
// yet on this node. It does NOT imply the peer relay server can be
|
||||
// initialized for this node; the node may not be configured as a peer
|
||||
// relay server yet, or may be disabled by node attribute.
|
||||
Uninitialized ServerState = iota
|
||||
// NotConfigured indicates the peer relay server port has not been set for
|
||||
// this node; a node cannot be a peer relay server until the port has been
|
||||
// set.
|
||||
NotConfigured
|
||||
// Disabled indicates the peer relay server has been disabled by a node
|
||||
// attribute pushed via C2N.
|
||||
Disabled
|
||||
// Running indicates the peer relay server has been initialized and can
|
||||
// relay sessions between peers on the configured UDP port.
|
||||
Running
|
||||
// ShutDown indicates the peer relay server extension has been told to
|
||||
// shut down, and can no longer relay sessions between peers.
|
||||
ShutDown
|
||||
)
|
||||
|
||||
// ServerStatus contains the listening UDP port, state, and active sessions (if
|
||||
// any) for this node's peer relay server at a point in time.
|
||||
type ServerStatus struct {
|
||||
// State is the current phase/state in the peer relay server's state
|
||||
// machine. See [ServerState].
|
||||
State ServerState
|
||||
// UDPPort is the UDP port number that the peer relay server is listening
|
||||
// for incoming peer relay endpoint allocation requests on, as configured
|
||||
// by the user with 'tailscale set --relay-server-port=<PORT>'. If State is
|
||||
// [NotConfigured], this field will be -1.
|
||||
UDPPort int
|
||||
// Sessions is an array of detailed status information about each peer
|
||||
// relay session that this node's peer relay server is involved with. It
|
||||
// may be empty.
|
||||
Sessions []ServerSession
|
||||
}
|
||||
|
||||
// ServerInfo contains status-related information about the peer relay server
|
||||
// involved in a single peer relay session.
|
||||
type ServerInfo struct {
|
||||
// Endpoint is the [netip.AddrPort] for the peer relay server's underlay
|
||||
// endpoint participating in the session. Both clients in a session are
|
||||
// bound into the same endpoint on the server. This may be invalid; check
|
||||
// the value with [netip.AddrPort.IsValid] before using.
|
||||
Endpoint netip.AddrPort
|
||||
// ShortDisco is a string representation of the peer relay server's disco
|
||||
// public key. This can be the empty string.
|
||||
ShortDisco string
|
||||
}
|
||||
|
||||
// String returns a string representation of the [ServerInfo] containing the
|
||||
// endpoint address/port and short disco public key.
|
||||
func (i *ServerInfo) String() string {
|
||||
disco := i.ShortDisco
|
||||
if disco == "" {
|
||||
disco = "[d:unknown]"
|
||||
}
|
||||
|
||||
if i.Endpoint.IsValid() {
|
||||
return fmt.Sprintf("%v[%s]", i.Endpoint, disco)
|
||||
} else {
|
||||
return fmt.Sprintf("unknown[%s]", disco)
|
||||
}
|
||||
}
|
||||
|
||||
// ClientInfo contains status-related information about a single peer relay
|
||||
// client involved in a single peer relay session.
|
||||
type ClientInfo struct {
|
||||
// Endpoint is the [netip.AddrPort] of this peer relay client's underlay
|
||||
// endpoint participating in the session. This may be invalid; check the
|
||||
// value with [netip.AddrPort.IsValid] before using.
|
||||
Endpoint netip.AddrPort
|
||||
// ShortDisco is a string representation of this peer relay client's disco
|
||||
// public key. This can be the empty string.
|
||||
ShortDisco string
|
||||
// PacketsTx is the number of packets this peer relay client has sent to
|
||||
// the other client via the relay server after completing session
|
||||
// establishment. This is identical to the number of packets that the peer
|
||||
// relay server has received from this client.
|
||||
PacketsTx uint64
|
||||
// BytesTx is the total overlay bytes this peer relay client has sent to
|
||||
// the other client via the relay server after completing session
|
||||
// establishment. This is identical to the total overlay bytes that the
|
||||
// peer relay server has received from this client.
|
||||
BytesTx uint64
|
||||
}
|
||||
|
||||
// String returns a string representation of the [ClientInfo] containing the
|
||||
// endpoint address/port, short disco public key, and packet/byte counts.
|
||||
func (i *ClientInfo) String() string {
|
||||
disco := i.ShortDisco
|
||||
if disco == "" {
|
||||
disco = "[d:unknown]"
|
||||
}
|
||||
|
||||
if i.Endpoint.IsValid() {
|
||||
return fmt.Sprintf("%v[%s] tx %v(%vB)", i.Endpoint, i.ShortDisco, i.PacketsTx, i.BytesTx)
|
||||
} else {
|
||||
return fmt.Sprintf("unknown[%s] tx %v(%vB)", disco, i.PacketsTx, i.BytesTx)
|
||||
}
|
||||
}
|
||||
|
||||
// ServerSession contains status information for a single session between two
|
||||
// peer relay clients, which are relayed via one peer relay server. This is the
|
||||
// status as seen by the peer relay server; each client node may have a
|
||||
// different view of the session's current status based on connectivity and
|
||||
// where the client is in the peer relay endpoint setup (allocation, binding,
|
||||
// pinging, active).
|
||||
type ServerSession struct {
|
||||
// Status is the current state of the session, as seen by the peer relay
|
||||
// server. It contains the status of each phase of session setup and usage:
|
||||
// endpoint allocation, endpoint binding, disco ping/pong, and active.
|
||||
Status SessionStatus
|
||||
// VNI is the Virtual Network Identifier for this peer relay session, which
|
||||
// comes from the Geneve header and is unique to this session.
|
||||
VNI uint32
|
||||
// Server contains status information about the peer relay server involved
|
||||
// in this session.
|
||||
Server ServerInfo
|
||||
// Client1 contains status information about one of the two peer relay
|
||||
// clients involved in this session. Note that 'Client1' does NOT mean this
|
||||
// was/wasn't the allocating client, or the first client to bind, etc; this
|
||||
// is just one client of two.
|
||||
Client1 ClientInfo
|
||||
// Client2 contains status information about one of the two peer relay
|
||||
// clients involved in this session. Note that 'Client2' does NOT mean this
|
||||
// was/wasn't the allocating client, or the second client to bind, etc;
|
||||
// this is just one client of two.
|
||||
Client2 ClientInfo
|
||||
}
|
||||
|
||||
// SessionStatus is the current state of a peer relay session, as seen by the
|
||||
// peer relay server that's relaying the session.
|
||||
type SessionStatus int
|
||||
|
||||
const (
|
||||
// NotStarted is the default "unknown" state for a session; it should not
|
||||
// be seen outside of initialization.
|
||||
NotStarted SessionStatus = iota
|
||||
// Allocating indicates a peer relay client has contacted the peer relay
|
||||
// server with a valid endpoint allocation request, and the server is in
|
||||
// the process of allocating it. A session remains in this state until one
|
||||
// of the two clients begins the Binding process.
|
||||
Allocating
|
||||
// Binding indicates at least one of the two peer relay clients has started
|
||||
// the endpoint binding handshake with the peer relay server's endpoint for
|
||||
// this session. A session remains in this state until both clients have
|
||||
// completed the binding handshake and are bound into the endpoint.
|
||||
Binding
|
||||
// Pinging indicates the two peer relay clients should be sending disco
|
||||
// ping/pong messages to one another to confirm peer relay session
|
||||
// connectivity via the peer relay server endpoint. We don't actually
|
||||
// monitor the disco ping/pong messages between the clients; we move into
|
||||
// this state when Binding is complete, and move out of this state to
|
||||
// [Active] when we see packets being exchanged bidirectionally over the
|
||||
// session endpoint. As such, Pinging is currently an implicit intermediate
|
||||
// state rather than a "confirmed by looking at disco ping/pong" state.
|
||||
Pinging
|
||||
// Active indicates the peer relay clients are both bound into the peer
|
||||
// relay session, have completed their disco pinging process, and are
|
||||
// bidirectionally exchanging packets via the peer relay server.
|
||||
Active
|
||||
)
|
||||
|
||||
// String returns a short, human-readable string representation of the current
|
||||
// [SessionStatus].
|
||||
func (s SessionStatus) String() string {
|
||||
switch s {
|
||||
case Allocating:
|
||||
return "allocating endpoint"
|
||||
case Binding:
|
||||
return "binding endpoint"
|
||||
case Pinging:
|
||||
return "clients pinging"
|
||||
case Active:
|
||||
return "session active"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
@@ -303,6 +303,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
|
||||
tailscale.com/net/tstun from tailscale.com/tsd+
|
||||
tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/net/udprelay/status from tailscale.com/client/local
|
||||
tailscale.com/omit from tailscale.com/ipn/conffile
|
||||
tailscale.com/paths from tailscale.com/client/local+
|
||||
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
|
||||
|
Reference in New Issue
Block a user