mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-12 13:48:01 +00:00
cmd/k8s-operator,k8s-operator/sessionrecording,sessionrecording,ssh/tailssh: refactor session recording functionality (#12945)
cmd/k8s-operator,k8s-operator/sessionrecording,sessionrecording,ssh/tailssh: refactor session recording functionality Refactor SSH session recording functionality (mostly the bits related to Kubernetes API server proxy 'kubectl exec' session recording): - move the session recording bits used by both Tailscale SSH and the Kubernetes API server proxy into a shared sessionrecording package, to avoid having the operator to import ssh/tailssh - move the Kubernetes API server proxy session recording functionality into a k8s-operator/sessionrecording package, add some abstractions in preparation for adding support for a second streaming protocol (WebSockets) Updates tailscale/corp#19821 Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
@@ -5,7 +5,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+
|
||||
W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate
|
||||
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
|
||||
LD github.com/anmitsu/go-shlex from tailscale.com/tempfork/gliderlabs/ssh
|
||||
L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+
|
||||
L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore
|
||||
L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+
|
||||
@@ -82,7 +81,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
github.com/bits-and-blooms/bitset from github.com/gaissmai/bart
|
||||
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
|
||||
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
|
||||
LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh
|
||||
💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump
|
||||
W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+
|
||||
W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+
|
||||
@@ -113,7 +111,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+
|
||||
github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference
|
||||
github.com/go-openapi/swag from github.com/go-openapi/jsonpointer+
|
||||
L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns+
|
||||
L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns
|
||||
💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+
|
||||
github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+
|
||||
github.com/golang/groupcache/lru from k8s.io/client-go/tools/record+
|
||||
@@ -161,7 +159,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe
|
||||
github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd
|
||||
github.com/kortschak/wol from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/kr/fs from github.com/pkg/sftp
|
||||
github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter
|
||||
💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag
|
||||
github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag
|
||||
@@ -183,8 +180,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4
|
||||
L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream
|
||||
github.com/pkg/errors from github.com/evanphx/json-patch/v5+
|
||||
LD github.com/pkg/sftp from tailscale.com/ssh/tailssh
|
||||
LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp
|
||||
D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack
|
||||
💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+
|
||||
github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics
|
||||
@@ -207,7 +202,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+
|
||||
github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh
|
||||
LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+
|
||||
LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal
|
||||
LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh
|
||||
github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+
|
||||
github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper
|
||||
@@ -230,7 +225,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device
|
||||
💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+
|
||||
github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck
|
||||
LD github.com/u-root/u-root/pkg/termios from tailscale.com/ssh/tailssh
|
||||
L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4
|
||||
L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+
|
||||
L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink
|
||||
@@ -660,7 +654,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/client/web from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/clientupdate from tailscale.com/client/web+
|
||||
tailscale.com/clientupdate/distsign from tailscale.com/clientupdate
|
||||
LD tailscale.com/cmd/tailscaled/childproc from tailscale.com/ssh/tailssh
|
||||
tailscale.com/control/controlbase from tailscale.com/control/controlhttp+
|
||||
tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/control/controlhttp from tailscale.com/control/controlclient
|
||||
@@ -692,6 +685,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1
|
||||
tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/k8s-operator/sessionrecording from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/k8s-operator/sessionrecording/conn from tailscale.com/k8s-operator/sessionrecording/spdy
|
||||
tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording
|
||||
tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+
|
||||
tailscale.com/kube from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/licenses from tailscale.com/client/web
|
||||
tailscale.com/log/filelogger from tailscale.com/logpolicy
|
||||
@@ -744,16 +741,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/posture from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/proxymap from tailscale.com/tsd+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
|
||||
💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/sessionrecording from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/syncs from tailscale.com/control/controlknobs+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+
|
||||
LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh
|
||||
tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
W tailscale.com/tsconst from tailscale.com/net/netmon+
|
||||
tailscale.com/tsd from tailscale.com/ipn/ipnlocal+
|
||||
tailscale.com/tsnet from tailscale.com/cmd/k8s-operator
|
||||
tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/tstime from tailscale.com/cmd/k8s-operator+
|
||||
tailscale.com/tstime/mono from tailscale.com/net/tstun+
|
||||
tailscale.com/tstime/rate from tailscale.com/derp+
|
||||
@@ -838,7 +834,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
golang.org/x/crypto/argon2 from tailscale.com/tka
|
||||
golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+
|
||||
golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+
|
||||
LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+
|
||||
LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+
|
||||
golang.org/x/crypto/chacha20poly1305 from crypto/tls+
|
||||
golang.org/x/crypto/cryptobyte from crypto/ecdsa+
|
||||
@@ -849,7 +845,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device+
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
LD golang.org/x/crypto/ssh from github.com/pkg/sftp+
|
||||
golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+
|
||||
golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+
|
||||
golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+
|
||||
@@ -954,7 +949,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
log/internal from log+
|
||||
log/slog from github.com/go-logr/logr+
|
||||
log/slog/internal from log/slog
|
||||
LD log/syslog from tailscale.com/ssh/tailssh
|
||||
maps from sigs.k8s.io/controller-runtime/pkg/predicate+
|
||||
math from archive/tar+
|
||||
math/big from crypto/dsa+
|
||||
|
@@ -22,8 +22,9 @@ import (
|
||||
"k8s.io/client-go/transport"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
kubesessionrecording "tailscale.com/k8s-operator/sessionrecording"
|
||||
tskube "tailscale.com/kube"
|
||||
"tailscale.com/ssh/tailssh"
|
||||
"tailscale.com/sessionrecording"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/util/clientmetric"
|
||||
@@ -36,12 +37,6 @@ var whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil))
|
||||
var (
|
||||
// counterNumRequestsproxies counts the number of API server requests proxied via this proxy.
|
||||
counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied")
|
||||
|
||||
// counterSessionRecordingsAttempted counts the number of session recording attempts.
|
||||
counterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy__session_recordings_attempted")
|
||||
|
||||
// counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings.
|
||||
counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded")
|
||||
)
|
||||
|
||||
type apiServerProxyMode int
|
||||
@@ -232,7 +227,7 @@ func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) {
|
||||
ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who)))
|
||||
return
|
||||
}
|
||||
counterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded
|
||||
kubesessionrecording.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded
|
||||
if !failOpen && len(addrs) == 0 {
|
||||
msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available."
|
||||
ap.log.Error(msg)
|
||||
@@ -252,18 +247,7 @@ func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, msg, http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
spdyH := &spdyHijacker{
|
||||
ts: ap.ts,
|
||||
req: r,
|
||||
who: who,
|
||||
ResponseWriter: w,
|
||||
log: ap.log,
|
||||
pod: r.PathValue("pod"),
|
||||
ns: r.PathValue("namespace"),
|
||||
addrs: addrs,
|
||||
failOpen: failOpen,
|
||||
connectToRecorder: tailssh.ConnectToRecorder,
|
||||
}
|
||||
spdyH := kubesessionrecording.New(ap.ts, r, who, w, r.PathValue("pod"), r.PathValue("namespace"), kubesessionrecording.SPDYProtocol, addrs, failOpen, sessionrecording.ConnectToRecorder, ap.log)
|
||||
|
||||
ap.rp.ServeHTTP(spdyH, r.WithContext(whoIsKey.WithValue(r.Context(), who)))
|
||||
}
|
||||
|
@@ -1,88 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
// recorder knows how to send the provided bytes to the configured tsrecorder
|
||||
// instance in asciinema format.
|
||||
type recorder struct {
|
||||
start time.Time
|
||||
clock tstime.Clock
|
||||
|
||||
// failOpen specifies whether the session should be allowed to
|
||||
// continue if writing to the recording fails.
|
||||
failOpen bool
|
||||
|
||||
// backOff is set to true if we've failed open and should stop
|
||||
// attempting to write to tsrecorder.
|
||||
backOff bool
|
||||
|
||||
mu sync.Mutex // guards writes to conn
|
||||
conn io.WriteCloser // connection to a tsrecorder instance
|
||||
}
|
||||
|
||||
// Write appends timestamp to the provided bytes and sends them to the
|
||||
// configured tsrecorder.
|
||||
func (rec *recorder) Write(p []byte) (err error) {
|
||||
if len(p) == 0 {
|
||||
return nil
|
||||
}
|
||||
if rec.backOff {
|
||||
return nil
|
||||
}
|
||||
j, err := json.Marshal([]any{
|
||||
rec.clock.Now().Sub(rec.start).Seconds(),
|
||||
"o",
|
||||
string(p),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marhalling payload: %w", err)
|
||||
}
|
||||
j = append(j, '\n')
|
||||
if err := rec.writeCastLine(j); err != nil {
|
||||
if !rec.failOpen {
|
||||
return fmt.Errorf("error writing payload to recorder: %w", err)
|
||||
}
|
||||
rec.backOff = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rec *recorder) Close() error {
|
||||
rec.mu.Lock()
|
||||
defer rec.mu.Unlock()
|
||||
if rec.conn == nil {
|
||||
return nil
|
||||
}
|
||||
err := rec.conn.Close()
|
||||
rec.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCastLine sends bytes to the tsrecorder. The bytes should be in
|
||||
// asciinema format.
|
||||
func (rec *recorder) writeCastLine(j []byte) error {
|
||||
rec.mu.Lock()
|
||||
defer rec.mu.Unlock()
|
||||
if rec.conn == nil {
|
||||
return errors.New("recorder closed")
|
||||
}
|
||||
_, err := rec.conn.Write(j)
|
||||
if err != nil {
|
||||
return fmt.Errorf("recorder write error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,285 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
SYN_STREAM ControlFrameType = 1 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.1
|
||||
SYN_REPLY ControlFrameType = 2 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.2
|
||||
SYN_PING ControlFrameType = 6 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.5
|
||||
)
|
||||
|
||||
// spdyFrame is a parsed SPDY frame as defined in
|
||||
// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt
|
||||
// A SPDY frame can be either a control frame or a data frame.
|
||||
type spdyFrame struct {
|
||||
Raw []byte // full frame as raw bytes
|
||||
|
||||
// Common frame fields:
|
||||
Ctrl bool // true if this is a SPDY control frame
|
||||
Payload []byte // payload as raw bytes
|
||||
|
||||
// Control frame fields:
|
||||
Version uint16 // SPDY protocol version
|
||||
Type ControlFrameType
|
||||
|
||||
// Data frame fields:
|
||||
// StreamID is the id of the steam to which this data frame belongs.
|
||||
// SPDY allows transmitting multiple data streams concurrently.
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
// Type of an SPDY control frame.
|
||||
type ControlFrameType uint16
|
||||
|
||||
// Parse parses bytes into spdyFrame.
|
||||
// If the bytes don't contain a full frame, return false.
|
||||
//
|
||||
// Control frame structure:
|
||||
//
|
||||
// +----------------------------------+
|
||||
// |C| Version(15bits) | Type(16bits) |
|
||||
// +----------------------------------+
|
||||
// | Flags (8) | Length (24 bits) |
|
||||
// +----------------------------------+
|
||||
// | Data |
|
||||
// +----------------------------------+
|
||||
//
|
||||
// Data frame structure:
|
||||
//
|
||||
// +----------------------------------+
|
||||
// |C| Stream-ID (31bits) |
|
||||
// +----------------------------------+
|
||||
// | Flags (8) | Length (24 bits) |
|
||||
// +----------------------------------+
|
||||
// | Data |
|
||||
// +----------------------------------+
|
||||
//
|
||||
// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt
|
||||
func (sf *spdyFrame) Parse(b []byte, log *zap.SugaredLogger) (ok bool, _ error) {
|
||||
const (
|
||||
spdyHeaderLength = 8
|
||||
)
|
||||
have := len(b)
|
||||
if have < spdyHeaderLength { // input does not contain full frame
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !isSPDYFrameHeader(b) {
|
||||
return false, fmt.Errorf("bytes %v do not seem to contain SPDY frames. Ensure that you are using a SPDY based client to 'kubectl exec'.", b)
|
||||
}
|
||||
|
||||
payloadLength := readInt24(b[5:8])
|
||||
frameLength := payloadLength + spdyHeaderLength
|
||||
if have < frameLength { // input does not contain full frame
|
||||
return false, nil
|
||||
}
|
||||
|
||||
frame := b[:frameLength:frameLength] // enforce frameLength capacity
|
||||
|
||||
sf.Raw = frame
|
||||
sf.Payload = frame[spdyHeaderLength:frameLength]
|
||||
|
||||
sf.Ctrl = hasControlBitSet(frame)
|
||||
|
||||
if !sf.Ctrl { // data frame
|
||||
sf.StreamID = dataFrameStreamID(frame)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
sf.Version = controlFrameVersion(frame)
|
||||
sf.Type = controlFrameType(frame)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// parseHeaders retrieves any headers from this spdyFrame.
|
||||
func (sf *spdyFrame) parseHeaders(z *zlibReader, log *zap.SugaredLogger) (http.Header, error) {
|
||||
if !sf.Ctrl {
|
||||
return nil, fmt.Errorf("[unexpected] parseHeaders called for a frame that is not a control frame")
|
||||
}
|
||||
const (
|
||||
// +------------------------------------+
|
||||
// |X| Stream-ID (31bits) |
|
||||
// +------------------------------------+
|
||||
// |X| Associated-To-Stream-ID (31bits) |
|
||||
// +------------------------------------+
|
||||
// | Pri|Unused | Slot | |
|
||||
// +-------------------+ |
|
||||
synStreamPayloadLengthBeforeHeaders = 10
|
||||
|
||||
// +------------------------------------+
|
||||
// |X| Stream-ID (31bits) |
|
||||
//+------------------------------------+
|
||||
synReplyPayloadLengthBeforeHeaders = 4
|
||||
|
||||
// +----------------------------------|
|
||||
// | 32-bit ID |
|
||||
// +----------------------------------+
|
||||
pingPayloadLength = 4
|
||||
)
|
||||
|
||||
switch sf.Type {
|
||||
case SYN_STREAM:
|
||||
if len(sf.Payload) < synStreamPayloadLengthBeforeHeaders {
|
||||
return nil, fmt.Errorf("SYN_STREAM frame too short: %v", len(sf.Payload))
|
||||
}
|
||||
z.Set(sf.Payload[synStreamPayloadLengthBeforeHeaders:])
|
||||
return parseHeaders(z, log)
|
||||
case SYN_REPLY:
|
||||
if len(sf.Payload) < synReplyPayloadLengthBeforeHeaders {
|
||||
return nil, fmt.Errorf("SYN_REPLY frame too short: %v", len(sf.Payload))
|
||||
}
|
||||
if len(sf.Payload) == synReplyPayloadLengthBeforeHeaders {
|
||||
return nil, nil // no headers
|
||||
}
|
||||
z.Set(sf.Payload[synReplyPayloadLengthBeforeHeaders:])
|
||||
return parseHeaders(z, log)
|
||||
case SYN_PING:
|
||||
if len(sf.Payload) != pingPayloadLength {
|
||||
return nil, fmt.Errorf("PING frame with unexpected length %v", len(sf.Payload))
|
||||
}
|
||||
return nil, nil // ping frame has no headers
|
||||
|
||||
default:
|
||||
log.Infof("[unexpected] unknown control frame type %v", sf.Type)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// parseHeaders expects to be passed a reader that contains a compressed SPDY control
|
||||
// frame Name/Value Header Block with 0 or more headers:
|
||||
//
|
||||
// | Number of Name/Value pairs (int32) | <+
|
||||
// +------------------------------------+ |
|
||||
// | Length of name (int32) | | This section is the "Name/Value
|
||||
// +------------------------------------+ | Header Block", and is compressed.
|
||||
// | Name (string) | |
|
||||
// +------------------------------------+ |
|
||||
// | Length of value (int32) | |
|
||||
// +------------------------------------+ |
|
||||
// | Value (string) | |
|
||||
// +------------------------------------+ |
|
||||
// | (repeats) | <+
|
||||
//
|
||||
// It extracts the headers and returns them as http.Header. By doing that it
|
||||
// also advances the provided reader past the headers block.
|
||||
// See also https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10
|
||||
func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
|
||||
// readUint32 reads the next 4 decompressed bytes from the decompressor
|
||||
// as a uint32.
|
||||
readUint32 := func() (uint32, error) {
|
||||
const uint32Length = 4
|
||||
if _, err := io.CopyN(buf, decompressor, uint32Length); err != nil { // decompress
|
||||
return 0, fmt.Errorf("error decompressing bytes: %w", err)
|
||||
}
|
||||
return binary.BigEndian.Uint32(buf.Next(uint32Length)), nil // return as uint32
|
||||
}
|
||||
|
||||
// readLenBytes decompresses and returns as bytes the next 'Name' or 'Value'
|
||||
// field from SPDY Name/Value header block. decompressor must be at
|
||||
// 'Length of name'/'Length of value' field.
|
||||
readLenBytes := func() ([]byte, error) {
|
||||
xLen, err := readUint32() // length of field to read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.CopyN(buf, decompressor, int64(xLen)); err != nil { // decompress
|
||||
return nil, err
|
||||
}
|
||||
return buf.Next(int(xLen)), nil
|
||||
}
|
||||
|
||||
numHeaders, err := readUint32()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error determining num headers: %v", err)
|
||||
}
|
||||
h := make(http.Header, numHeaders)
|
||||
for i := uint32(0); i < numHeaders; i++ {
|
||||
name, err := readLenBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ns := string(name)
|
||||
if _, ok := h[ns]; ok {
|
||||
return nil, fmt.Errorf("invalid data: duplicate header %q", ns)
|
||||
}
|
||||
val, err := readLenBytes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading header data: %w", err)
|
||||
}
|
||||
for _, v := range bytes.Split(val, headerSep) {
|
||||
h.Add(ns, string(v))
|
||||
}
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// isSPDYFrame validates that the input bytes start with a valid SPDY frame
|
||||
// header.
|
||||
func isSPDYFrameHeader(f []byte) bool {
|
||||
if hasControlBitSet(f) {
|
||||
// If this is a control frame, version and type must be set.
|
||||
return controlFrameVersion(f) != uint16(0) && uint16(controlFrameType(f)) != uint16(0)
|
||||
}
|
||||
// If this is a data frame, stream ID must be set.
|
||||
return dataFrameStreamID(f) != uint32(0)
|
||||
}
|
||||
|
||||
// spdyDataFrameStreamID returns stream ID for an SPDY data frame passed as the
|
||||
// input data slice. StreaID is contained within bits [0-31) of a data frame
|
||||
// header.
|
||||
func dataFrameStreamID(frame []byte) uint32 {
|
||||
return binary.BigEndian.Uint32(frame[0:4]) & 0x7f
|
||||
}
|
||||
|
||||
// controlFrameType returns the type of a SPDY control frame.
|
||||
// See https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6
|
||||
func controlFrameType(f []byte) ControlFrameType {
|
||||
return ControlFrameType(binary.BigEndian.Uint16(f[2:4]))
|
||||
}
|
||||
|
||||
// spdyControlFrameVersion returns SPDY version extracted from input bytes that
|
||||
// must be a SPDY control frame.
|
||||
func controlFrameVersion(frame []byte) uint16 {
|
||||
bs := binary.BigEndian.Uint16(frame[0:2]) // first 16 bits
|
||||
return bs & 0x7f // discard control bit
|
||||
}
|
||||
|
||||
// hasControlBitSet returns true if the passsed bytes have SPDY control bit set.
|
||||
// SPDY frames can be either control frames or data frames. A control frame has
|
||||
// control bit set to 1 and a data frame has it set to 0.
|
||||
func hasControlBitSet(frame []byte) bool {
|
||||
return frame[0]&0x80 == 128 // 0x80
|
||||
}
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() any {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// Headers in SPDY header name/value block are separated by a 0 byte.
|
||||
// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10
|
||||
var headerSep = []byte{0}
|
||||
|
||||
func readInt24(b []byte) int {
|
||||
_ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return int(b[0])<<16 | int(b[1])<<8 | int(b[2])
|
||||
}
|
@@ -1,293 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func Test_spdyFrame_Parse(t *testing.T) {
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
gotBytes []byte
|
||||
wantFrame spdyFrame
|
||||
wantOk bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "control_frame_syn_stream",
|
||||
gotBytes: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0},
|
||||
wantFrame: spdyFrame{
|
||||
Version: 3,
|
||||
Type: SYN_STREAM,
|
||||
Ctrl: true,
|
||||
Raw: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0},
|
||||
Payload: []byte{},
|
||||
},
|
||||
wantOk: true,
|
||||
},
|
||||
{
|
||||
name: "control_frame_syn_reply",
|
||||
gotBytes: []byte{0x80, 0x3, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0},
|
||||
wantFrame: spdyFrame{
|
||||
Ctrl: true,
|
||||
Version: 3,
|
||||
Type: SYN_REPLY,
|
||||
Raw: []byte{0x80, 0x3, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0},
|
||||
Payload: []byte{},
|
||||
},
|
||||
wantOk: true,
|
||||
},
|
||||
{
|
||||
name: "control_frame_headers",
|
||||
gotBytes: []byte{0x80, 0x3, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0},
|
||||
wantFrame: spdyFrame{
|
||||
Ctrl: true,
|
||||
Version: 3,
|
||||
Type: 8,
|
||||
Raw: []byte{0x80, 0x3, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0},
|
||||
Payload: []byte{},
|
||||
},
|
||||
wantOk: true,
|
||||
},
|
||||
{
|
||||
name: "data_frame_stream_id_5",
|
||||
gotBytes: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0},
|
||||
wantFrame: spdyFrame{
|
||||
Payload: []byte{},
|
||||
StreamID: 5,
|
||||
Raw: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
wantOk: true,
|
||||
},
|
||||
{
|
||||
name: "frame_with_incomplete_header",
|
||||
gotBytes: []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
name: "frame_with_incomplete_payload",
|
||||
gotBytes: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x2}, // header specifies payload length of 2
|
||||
},
|
||||
{
|
||||
name: "control_bit_set_not_spdy_frame",
|
||||
gotBytes: []byte{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, // header specifies payload length of 2
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "control_bit_not_set_not_spdy_frame",
|
||||
gotBytes: []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, // header specifies payload length of 2
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sf := &spdyFrame{}
|
||||
gotOk, err := sf.Parse(tt.gotBytes, zl.Sugar())
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("spdyFrame.Parse() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotOk != tt.wantOk {
|
||||
t.Errorf("spdyFrame.Parse() = %v, want %v", gotOk, tt.wantOk)
|
||||
}
|
||||
if diff := cmp.Diff(*sf, tt.wantFrame); diff != "" {
|
||||
t.Errorf("Unexpected SPDY frame (-got +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_spdyFrame_parseHeaders(t *testing.T) {
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
isCtrl bool
|
||||
payload []byte
|
||||
typ ControlFrameType
|
||||
wantHeader http.Header
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "syn_stream_with_header",
|
||||
payload: payload(t, map[string]string{"Streamtype": "stdin"}, SYN_STREAM, 1),
|
||||
typ: SYN_STREAM,
|
||||
isCtrl: true,
|
||||
wantHeader: header(map[string]string{"Streamtype": "stdin"}),
|
||||
},
|
||||
{
|
||||
name: "syn_ping",
|
||||
payload: payload(t, nil, SYN_PING, 0),
|
||||
typ: SYN_PING,
|
||||
isCtrl: true,
|
||||
},
|
||||
{
|
||||
name: "syn_reply_headers",
|
||||
payload: payload(t, map[string]string{"foo": "bar", "bar": "baz"}, SYN_REPLY, 0),
|
||||
typ: SYN_REPLY,
|
||||
isCtrl: true,
|
||||
wantHeader: header(map[string]string{"foo": "bar", "bar": "baz"}),
|
||||
},
|
||||
{
|
||||
name: "syn_reply_no_headers",
|
||||
payload: payload(t, nil, SYN_REPLY, 0),
|
||||
typ: SYN_REPLY,
|
||||
isCtrl: true,
|
||||
},
|
||||
{
|
||||
name: "syn_stream_too_short_payload",
|
||||
payload: []byte{0, 1, 2, 3, 4},
|
||||
typ: SYN_STREAM,
|
||||
isCtrl: true,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "syn_reply_too_short_payload",
|
||||
payload: []byte{0, 1, 2},
|
||||
typ: SYN_REPLY,
|
||||
isCtrl: true,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "syn_ping_too_short_payload",
|
||||
payload: []byte{0, 1, 2},
|
||||
typ: SYN_PING,
|
||||
isCtrl: true,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "not_a_control_frame",
|
||||
payload: []byte{0, 1, 2, 3},
|
||||
typ: SYN_PING,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var reader zlibReader
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sf := &spdyFrame{
|
||||
Ctrl: tt.isCtrl,
|
||||
Type: tt.typ,
|
||||
Payload: tt.payload,
|
||||
}
|
||||
gotHeader, err := sf.parseHeaders(&reader, zl.Sugar())
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("spdyFrame.parseHeaders() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !reflect.DeepEqual(gotHeader, tt.wantHeader) {
|
||||
t.Errorf("spdyFrame.parseHeaders() = %v, want %v", gotHeader, tt.wantHeader)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// payload takes a control frame type and a map with 0 or more header keys and
|
||||
// values and returns a SPDY control frame payload with the header as SPDY zlib
|
||||
// compressed header name/value block. The payload is padded with arbitrary
|
||||
// bytes to ensure the header name/value block is in the correct position for
|
||||
// the frame type.
|
||||
func payload(t *testing.T, headerM map[string]string, typ ControlFrameType, streamID int) []byte {
|
||||
t.Helper()
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
writeControlFramePayloadBeforeHeaders(t, buf, typ, streamID)
|
||||
if len(headerM) == 0 {
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
w, err := zlib.NewWriterLevelDict(buf, zlib.BestCompression, spdyTxtDictionary)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating new zlib writer: %v", err)
|
||||
}
|
||||
if len(headerM) != 0 {
|
||||
writeHeaderValueBlock(t, w, headerM)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("error writing headers: %v", err)
|
||||
}
|
||||
w.Flush()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// writeControlFramePayloadBeforeHeaders writes to w N bytes, N being the number
|
||||
// of bytes that control frame payload for that control frame is required to
|
||||
// contain before the name/value header block.
|
||||
func writeControlFramePayloadBeforeHeaders(t *testing.T, w io.Writer, typ ControlFrameType, streamID int) {
|
||||
t.Helper()
|
||||
switch typ {
|
||||
case SYN_STREAM:
|
||||
// needs 10 bytes in payload before any headers
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(streamID)); err != nil {
|
||||
t.Fatalf("writing streamID: %v", err)
|
||||
}
|
||||
if err := binary.Write(w, binary.BigEndian, [6]byte{0}); err != nil {
|
||||
t.Fatalf("writing payload: %v", err)
|
||||
}
|
||||
case SYN_REPLY:
|
||||
// needs 4 bytes in payload before any headers
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(0)); err != nil {
|
||||
t.Fatalf("writing payload: %v", err)
|
||||
}
|
||||
case SYN_PING:
|
||||
// needs 4 bytes in payload
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(0)); err != nil {
|
||||
t.Fatalf("writing payload: %v", err)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected frame type: %v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
// writeHeaderValue block takes http.Header and zlib writer, writes the headers
|
||||
// as SPDY zlib compressed bytes to the writer.
|
||||
// Adopted from https://github.com/moby/spdystream/blob/v0.2.0/spdy/write.go#L171-L198 (which is also what Kubernetes uses).
|
||||
func writeHeaderValueBlock(t *testing.T, w io.Writer, headerM map[string]string) {
|
||||
t.Helper()
|
||||
h := header(headerM)
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
|
||||
t.Fatalf("error writing header block length: %v", err)
|
||||
}
|
||||
for name, values := range h {
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
|
||||
t.Fatalf("error writing name length for name %q: %v", name, err)
|
||||
}
|
||||
name = strings.ToLower(name)
|
||||
if _, err := io.WriteString(w, name); err != nil {
|
||||
t.Fatalf("error writing name %q: %v", name, err)
|
||||
}
|
||||
v := strings.Join(values, string(headerSep))
|
||||
if err := binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
|
||||
t.Fatalf("error writing value length for value %q: %v", v, err)
|
||||
}
|
||||
if _, err := io.WriteString(w, v); err != nil {
|
||||
t.Fatalf("error writing value %q: %v", v, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func header(hs map[string]string) http.Header {
|
||||
h := make(http.Header, len(hs))
|
||||
for key, val := range hs {
|
||||
h.Add(key, val)
|
||||
}
|
||||
return h
|
||||
}
|
@@ -1,213 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/multierr"
|
||||
)
|
||||
|
||||
// spdyHijacker implements [net/http.Hijacker] interface.
|
||||
// It must be configured with an http request for a 'kubectl exec' session that
|
||||
// needs to be recorded. It knows how to hijack the connection and configure for
|
||||
// the session contents to be sent to a tsrecorder instance.
|
||||
type spdyHijacker struct {
|
||||
http.ResponseWriter
|
||||
ts *tsnet.Server
|
||||
req *http.Request
|
||||
who *apitype.WhoIsResponse
|
||||
log *zap.SugaredLogger
|
||||
pod string // pod being exec-d
|
||||
ns string // namespace of the pod being exec-d
|
||||
addrs []netip.AddrPort // tsrecorder addresses
|
||||
failOpen bool // whether to fail open if recording fails
|
||||
connectToRecorder RecorderDialFn
|
||||
}
|
||||
|
||||
// RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder
|
||||
// addresses. It tries to connect to recorder endpoints one by one, till one
|
||||
// connection succeeds. In case of success, returns a list with a single
|
||||
// successful recording attempt and an error channel. If the connection errors
|
||||
// after having been established, an error is sent down the channel.
|
||||
type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error)
|
||||
|
||||
// Hijack hijacks a 'kubectl exec' session and configures for the session
|
||||
// contents to be sent to a recorder.
|
||||
func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen)
|
||||
reqConn, brw, err := h.ResponseWriter.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error hijacking connection: %w", err)
|
||||
}
|
||||
|
||||
conn, err := h.setUpRecording(context.Background(), reqConn)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error setting up session recording: %w", err)
|
||||
}
|
||||
return conn, brw, nil
|
||||
}
|
||||
|
||||
// setupRecording attempts to connect to the recorders set via
|
||||
// spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording
|
||||
// logic. If connecting to the recorder fails or an error is received during the
|
||||
// session and spdyHijacker.failOpen is false, connection will be closed.
|
||||
func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
const (
|
||||
// https://docs.asciinema.org/manual/asciicast/v2/
|
||||
asciicastv2 = 2
|
||||
)
|
||||
var wc io.WriteCloser
|
||||
h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen)
|
||||
// TODO (irbekrm): send client a message that session will be recorded.
|
||||
rw, _, errChan, err := h.connectToRecorder(ctx, h.addrs, h.ts.Dial)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("error connecting to session recorders: %v", err)
|
||||
if h.failOpen {
|
||||
msg = msg + "; failure mode is 'fail open'; continuing session without recording."
|
||||
h.log.Warnf(msg)
|
||||
return conn, nil
|
||||
}
|
||||
msg = msg + "; failure mode is 'fail closed'; closing connection."
|
||||
if err := closeConnWithWarning(conn, msg); err != nil {
|
||||
return nil, multierr.New(errors.New(msg), err)
|
||||
}
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
// TODO (irbekrm): log which recorder
|
||||
h.log.Info("successfully connected to a session recorder")
|
||||
wc = rw
|
||||
cl := tstime.DefaultClock{}
|
||||
lc := &spdyRemoteConnRecorder{
|
||||
log: h.log,
|
||||
Conn: conn,
|
||||
rec: &recorder{
|
||||
start: cl.Now(),
|
||||
clock: cl,
|
||||
failOpen: h.failOpen,
|
||||
conn: wc,
|
||||
},
|
||||
}
|
||||
|
||||
qp := h.req.URL.Query()
|
||||
ch := CastHeader{
|
||||
Version: asciicastv2,
|
||||
Timestamp: lc.rec.start.Unix(),
|
||||
Command: strings.Join(qp["command"], " "),
|
||||
SrcNode: strings.TrimSuffix(h.who.Node.Name, "."),
|
||||
SrcNodeID: h.who.Node.StableID,
|
||||
Kubernetes: &Kubernetes{
|
||||
PodName: h.pod,
|
||||
Namespace: h.ns,
|
||||
Container: strings.Join(qp["container"], " "),
|
||||
},
|
||||
}
|
||||
if !h.who.Node.IsTagged() {
|
||||
ch.SrcNodeUser = h.who.UserProfile.LoginName
|
||||
ch.SrcNodeUserID = h.who.Node.User
|
||||
} else {
|
||||
ch.SrcNodeTags = h.who.Node.Tags
|
||||
}
|
||||
lc.ch = ch
|
||||
go func() {
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err = <-errChan:
|
||||
}
|
||||
if err == nil {
|
||||
counterSessionRecordingsUploaded.Add(1)
|
||||
h.log.Info("finished uploading the recording")
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("connection to the session recorder errorred: %v;", err)
|
||||
if h.failOpen {
|
||||
msg += msg + "; failure mode is 'fail open'; continuing session without recording."
|
||||
h.log.Info(msg)
|
||||
return
|
||||
}
|
||||
msg += "; failure mode set to 'fail closed'; closing connection"
|
||||
h.log.Error(msg)
|
||||
lc.failed = true
|
||||
// TODO (irbekrm): write a message to the client
|
||||
if err := lc.Close(); err != nil {
|
||||
h.log.Infof("error closing recorder connections: %v", err)
|
||||
}
|
||||
return
|
||||
}()
|
||||
return lc, nil
|
||||
}
|
||||
|
||||
// CastHeader is the asciicast header to be sent to the recorder at the start of
|
||||
// the recording of a session.
|
||||
// https://docs.asciinema.org/manual/asciicast/v2/#header
|
||||
type CastHeader struct {
|
||||
// Version is the asciinema file format version.
|
||||
Version int `json:"version"`
|
||||
|
||||
// Width is the terminal width in characters.
|
||||
Width int `json:"width"`
|
||||
|
||||
// Height is the terminal height in characters.
|
||||
Height int `json:"height"`
|
||||
|
||||
// Timestamp is the unix timestamp of when the recording started.
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
|
||||
// Tailscale-specific fields: SrcNode is the full MagicDNS name of the
|
||||
// tailnet node originating the connection, without the trailing dot.
|
||||
SrcNode string `json:"srcNode"`
|
||||
|
||||
// SrcNodeID is the node ID of the tailnet node originating the connection.
|
||||
SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"`
|
||||
|
||||
// SrcNodeTags is the list of tags on the node originating the connection (if any).
|
||||
SrcNodeTags []string `json:"srcNodeTags,omitempty"`
|
||||
|
||||
// SrcNodeUserID is the user ID of the node originating the connection (if not tagged).
|
||||
SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged
|
||||
|
||||
// SrcNodeUser is the LoginName of the node originating the connection (if not tagged).
|
||||
SrcNodeUser string `json:"srcNodeUser,omitempty"`
|
||||
|
||||
Command string
|
||||
|
||||
// Kubernetes-specific fields:
|
||||
Kubernetes *Kubernetes `json:"kubernetes,omitempty"`
|
||||
}
|
||||
|
||||
// Kubernetes contains 'kubectl exec' session specific information for
|
||||
// tsrecorder.
|
||||
type Kubernetes struct {
|
||||
PodName string
|
||||
Namespace string
|
||||
Container string
|
||||
}
|
||||
|
||||
func closeConnWithWarning(conn net.Conn, msg string) error {
|
||||
b := io.NopCloser(bytes.NewBuffer([]byte(msg)))
|
||||
resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b}
|
||||
if err := resp.Write(conn); err != nil {
|
||||
return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close())
|
||||
}
|
||||
return conn.Close()
|
||||
}
|
@@ -1,111 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tstest"
|
||||
)
|
||||
|
||||
func Test_SPDYHijacker(t *testing.T) {
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
failOpen bool
|
||||
failRecorderConnect bool // fail initial connect to the recorder
|
||||
failRecorderConnPostConnect bool // send error down the error channel
|
||||
wantsConnClosed bool
|
||||
wantsSetupErr bool
|
||||
}{
|
||||
{
|
||||
name: "setup succeeds, conn stays open",
|
||||
},
|
||||
{
|
||||
name: "setup fails, policy is to fail open, conn stays open",
|
||||
failOpen: true,
|
||||
failRecorderConnect: true,
|
||||
},
|
||||
{
|
||||
name: "setup fails, policy is to fail closed, conn is closed",
|
||||
failRecorderConnect: true,
|
||||
wantsSetupErr: true,
|
||||
wantsConnClosed: true,
|
||||
},
|
||||
{
|
||||
name: "connection fails post-initial connect, policy is to fail open, conn stays open",
|
||||
failRecorderConnPostConnect: true,
|
||||
failOpen: true,
|
||||
},
|
||||
{
|
||||
name: "connection fails post-initial connect, policy is to fail closed, conn is closed",
|
||||
failRecorderConnPostConnect: true,
|
||||
wantsConnClosed: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tc := &testConn{}
|
||||
ch := make(chan error)
|
||||
h := &spdyHijacker{
|
||||
connectToRecorder: func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) {
|
||||
if tt.failRecorderConnect {
|
||||
err = errors.New("test")
|
||||
}
|
||||
return wc, rec, ch, err
|
||||
},
|
||||
failOpen: tt.failOpen,
|
||||
who: &apitype.WhoIsResponse{Node: &tailcfg.Node{}, UserProfile: &tailcfg.UserProfile{}},
|
||||
log: zl.Sugar(),
|
||||
ts: &tsnet.Server{},
|
||||
req: &http.Request{URL: &url.URL{}},
|
||||
}
|
||||
ctx := context.Background()
|
||||
_, err := h.setUpRecording(ctx, tc)
|
||||
if (err != nil) != tt.wantsSetupErr {
|
||||
t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr)
|
||||
return
|
||||
}
|
||||
if tt.failRecorderConnPostConnect {
|
||||
select {
|
||||
case ch <- errors.New("err"):
|
||||
case <-time.After(time.Second * 15):
|
||||
t.Errorf("error from recorder conn was not read within 15 seconds")
|
||||
}
|
||||
}
|
||||
timeout := time.Second * 20
|
||||
// TODO (irbekrm): cover case where an error is received
|
||||
// over channel and the failure policy is to fail open
|
||||
// (test that connection remains open over some period
|
||||
// of time).
|
||||
if err := tstest.WaitFor(timeout, func() (err error) {
|
||||
if tt.wantsConnClosed != tc.isClosed() {
|
||||
return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.isClosed(), tt.wantsConnClosed)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Errorf("connection did not reach the desired state within %s", timeout.String())
|
||||
}
|
||||
ctx.Done()
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,194 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// spdyRemoteConnRecorder is a wrapper around net.Conn. It reads the bytestream
|
||||
// for a 'kubectl exec' session, sends session recording data to the configured
|
||||
// recorder and forwards the raw bytes to the original destination.
|
||||
type spdyRemoteConnRecorder struct {
|
||||
net.Conn
|
||||
// rec knows how to send data written to it to a tsrecorder instance.
|
||||
rec *recorder
|
||||
ch CastHeader
|
||||
|
||||
stdoutStreamID atomic.Uint32
|
||||
stderrStreamID atomic.Uint32
|
||||
resizeStreamID atomic.Uint32
|
||||
|
||||
wmu sync.Mutex // sequences writes
|
||||
closed bool
|
||||
failed bool
|
||||
|
||||
rmu sync.Mutex // sequences reads
|
||||
writeCastHeaderOnce sync.Once
|
||||
|
||||
zlibReqReader zlibReader
|
||||
// writeBuf is used to store data written to the connection that has not
|
||||
// yet been parsed as SPDY frames.
|
||||
writeBuf bytes.Buffer
|
||||
// readBuf is used to store data read from the connection that has not
|
||||
// yet been parsed as SPDY frames.
|
||||
readBuf bytes.Buffer
|
||||
log *zap.SugaredLogger
|
||||
}
|
||||
|
||||
// Read reads bytes from the original connection and parses them as SPDY frames.
|
||||
// If the frame is a data frame for resize stream, sends resize message to the
|
||||
// recorder. If the frame is a SYN_STREAM control frame that starts stdout,
|
||||
// stderr or resize stream, store the stream ID.
|
||||
func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) {
|
||||
c.rmu.Lock()
|
||||
defer c.rmu.Unlock()
|
||||
n, err := c.Conn.Read(b)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("error reading from connection: %w", err)
|
||||
}
|
||||
c.readBuf.Write(b[:n])
|
||||
|
||||
var sf spdyFrame
|
||||
ok, err := sf.Parse(c.readBuf.Bytes(), c.log)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error parsing data read from connection: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
// The parsed data in the buffer will be processed together with
|
||||
// the new data on the next call to Read.
|
||||
return n, nil
|
||||
}
|
||||
c.readBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame
|
||||
|
||||
if !sf.Ctrl { // data frame
|
||||
switch sf.StreamID {
|
||||
case c.resizeStreamID.Load():
|
||||
var err error
|
||||
var msg spdyResizeMsg
|
||||
if err = json.Unmarshal(sf.Payload, &msg); err != nil {
|
||||
return 0, fmt.Errorf("error umarshalling resize msg: %w", err)
|
||||
}
|
||||
c.ch.Width = msg.Width
|
||||
c.ch.Height = msg.Height
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
// We always want to parse the headers, even if we don't care about the
|
||||
// frame, as we need to advance the zlib reader otherwise we will get
|
||||
// garbage.
|
||||
header, err := sf.parseHeaders(&c.zlibReqReader, c.log)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error parsing frame headers: %w", err)
|
||||
}
|
||||
if sf.Type == SYN_STREAM {
|
||||
c.storeStreamID(sf, header)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Write forwards the raw data of the latest parsed SPDY frame to the original
|
||||
// destination. If the frame is an SPDY data frame, it also sends the payload to
|
||||
// the connected session recorder.
|
||||
func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) {
|
||||
c.wmu.Lock()
|
||||
defer c.wmu.Unlock()
|
||||
c.writeBuf.Write(b)
|
||||
|
||||
var sf spdyFrame
|
||||
ok, err := sf.Parse(c.writeBuf.Bytes(), c.log)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error parsing data: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
// The parsed data in the buffer will be processed together with
|
||||
// the new data on the next call to Write.
|
||||
return len(b), nil
|
||||
}
|
||||
c.writeBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame
|
||||
|
||||
// If this is a stdout or stderr data frame, send its payload to the
|
||||
// session recorder.
|
||||
if !sf.Ctrl {
|
||||
switch sf.StreamID {
|
||||
case c.stdoutStreamID.Load(), c.stderrStreamID.Load():
|
||||
var err error
|
||||
c.writeCastHeaderOnce.Do(func() {
|
||||
var j []byte
|
||||
j, err = json.Marshal(c.ch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
j = append(j, '\n')
|
||||
err = c.rec.writeCastLine(j)
|
||||
if err != nil {
|
||||
c.log.Errorf("received error from recorder: %v", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error writing CastHeader: %w", err)
|
||||
}
|
||||
if err := c.rec.Write(sf.Payload); err != nil {
|
||||
return 0, fmt.Errorf("error sending payload to session recorder: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Forward the whole frame to the original destination.
|
||||
_, err = c.Conn.Write(sf.Raw) // send to net.Conn
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
func (c *spdyRemoteConnRecorder) Close() error {
|
||||
c.wmu.Lock()
|
||||
defer c.wmu.Unlock()
|
||||
if c.closed {
|
||||
return nil
|
||||
}
|
||||
if !c.failed && c.writeBuf.Len() > 0 {
|
||||
c.Conn.Write(c.writeBuf.Bytes())
|
||||
}
|
||||
c.writeBuf.Reset()
|
||||
c.closed = true
|
||||
err := c.Conn.Close()
|
||||
c.rec.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// parseSynStream parses SYN_STREAM SPDY control frame and updates
|
||||
// spdyRemoteConnRecorder to store the newly created stream's ID if it is one of
|
||||
// the stream types we care about. Storing stream_id:stream_type mapping allows
|
||||
// us to parse received data frames (that have stream IDs) differently depening
|
||||
// on which stream they belong to (i.e send data frame payload for stdout stream
|
||||
// to session recorder).
|
||||
func (c *spdyRemoteConnRecorder) storeStreamID(sf spdyFrame, header http.Header) {
|
||||
const (
|
||||
streamTypeHeaderKey = "Streamtype"
|
||||
)
|
||||
id := binary.BigEndian.Uint32(sf.Payload[0:4])
|
||||
switch header.Get(streamTypeHeaderKey) {
|
||||
case corev1.StreamTypeStdout:
|
||||
c.stdoutStreamID.Store(id)
|
||||
case corev1.StreamTypeStderr:
|
||||
c.stderrStreamID.Store(id)
|
||||
case corev1.StreamTypeResize:
|
||||
c.resizeStreamID.Store(id)
|
||||
}
|
||||
}
|
||||
|
||||
type spdyResizeMsg struct {
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
}
|
@@ -1,326 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
// Test_Writes tests that 1 or more Write calls to spdyRemoteConnRecorder
|
||||
// results in the expected data being forwarded to the original destination and
|
||||
// the session recorder.
|
||||
func Test_Writes(t *testing.T) {
|
||||
var stdoutStreamID, stderrStreamID uint32 = 1, 2
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
tests := []struct {
|
||||
name string
|
||||
inputs [][]byte
|
||||
wantForwarded []byte
|
||||
wantRecorded []byte
|
||||
firstWrite bool
|
||||
width int
|
||||
height int
|
||||
}{
|
||||
{
|
||||
name: "single_write_control_frame_with_payload",
|
||||
inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5}},
|
||||
wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5},
|
||||
},
|
||||
{
|
||||
name: "two_writes_control_frame_with_leftover",
|
||||
inputs: [][]byte{{0x80, 0x3, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x1, 0x5, 0x80, 0x3}},
|
||||
wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5},
|
||||
},
|
||||
{
|
||||
name: "single_write_stdout_data_frame",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0}},
|
||||
wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
name: "single_write_stdout_data_frame_with_payload",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
|
||||
wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
|
||||
wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
|
||||
},
|
||||
{
|
||||
name: "single_write_stderr_data_frame_with_payload",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
|
||||
wantForwarded: []byte{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
|
||||
wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
|
||||
},
|
||||
{
|
||||
name: "single_data_frame_unknow_stream_with_payload",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
|
||||
wantForwarded: []byte{0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
|
||||
},
|
||||
{
|
||||
name: "control_frame_and_data_frame_split_across_two_writes",
|
||||
inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
|
||||
wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
|
||||
wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
|
||||
},
|
||||
{
|
||||
name: "single_first_write_stdout_data_frame_with_payload",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
|
||||
wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
|
||||
wantRecorded: append(asciinemaResizeMsg(t, 10, 20), castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...),
|
||||
width: 10,
|
||||
height: 20,
|
||||
firstWrite: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tc := &testConn{}
|
||||
sr := &testSessionRecorder{}
|
||||
rec := &recorder{
|
||||
conn: sr,
|
||||
clock: cl,
|
||||
start: cl.Now(),
|
||||
}
|
||||
|
||||
c := &spdyRemoteConnRecorder{
|
||||
Conn: tc,
|
||||
log: zl.Sugar(),
|
||||
rec: rec,
|
||||
ch: CastHeader{
|
||||
Width: tt.width,
|
||||
Height: tt.height,
|
||||
},
|
||||
}
|
||||
if !tt.firstWrite {
|
||||
// this test case does not intend to test that cast header gets written once
|
||||
c.writeCastHeaderOnce.Do(func() {})
|
||||
}
|
||||
|
||||
c.stdoutStreamID.Store(stdoutStreamID)
|
||||
c.stderrStreamID.Store(stderrStreamID)
|
||||
for i, input := range tt.inputs {
|
||||
if _, err := c.Write(input); err != nil {
|
||||
t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the expected bytes have been forwarded to the original destination.
|
||||
gotForwarded := tc.writeBuf.Bytes()
|
||||
if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) {
|
||||
t.Errorf("expected bytes not forwarded, wants\n%v\ngot\n%v", tt.wantForwarded, gotForwarded)
|
||||
}
|
||||
|
||||
// Assert that the expected bytes have been forwarded to the session recorder.
|
||||
gotRecorded := sr.buf.Bytes()
|
||||
if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) {
|
||||
t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_Reads tests that 1 or more Read calls to spdyRemoteConnRecorder results
|
||||
// in the expected data being forwarded to the original destination and the
|
||||
// session recorder.
|
||||
func Test_Reads(t *testing.T) {
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
var reader zlibReader
|
||||
resizeMsg := resizeMsgBytes(t, 10, 20)
|
||||
synStreamStdoutPayload := payload(t, map[string]string{"Streamtype": "stdout"}, SYN_STREAM, 1)
|
||||
synStreamStderrPayload := payload(t, map[string]string{"Streamtype": "stderr"}, SYN_STREAM, 2)
|
||||
synStreamResizePayload := payload(t, map[string]string{"Streamtype": "resize"}, SYN_STREAM, 3)
|
||||
syn_stream_ctrl_header := []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(synStreamStdoutPayload))}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputs [][]byte
|
||||
wantStdoutStreamID uint32
|
||||
wantStderrStreamID uint32
|
||||
wantResizeStreamID uint32
|
||||
wantWidth int
|
||||
wantHeight int
|
||||
resizeStreamIDBeforeRead uint32
|
||||
}{
|
||||
{
|
||||
name: "resize_data_frame_single_read",
|
||||
inputs: [][]byte{append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...)},
|
||||
resizeStreamIDBeforeRead: 1,
|
||||
wantWidth: 10,
|
||||
wantHeight: 20,
|
||||
},
|
||||
{
|
||||
name: "resize_data_frame_two_reads",
|
||||
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg},
|
||||
resizeStreamIDBeforeRead: 1,
|
||||
wantWidth: 10,
|
||||
wantHeight: 20,
|
||||
},
|
||||
{
|
||||
name: "syn_stream_ctrl_frame_stdout_single_read",
|
||||
inputs: [][]byte{append(syn_stream_ctrl_header, synStreamStdoutPayload...)},
|
||||
wantStdoutStreamID: 1,
|
||||
},
|
||||
{
|
||||
name: "syn_stream_ctrl_frame_stderr_single_read",
|
||||
inputs: [][]byte{append(syn_stream_ctrl_header, synStreamStderrPayload...)},
|
||||
wantStderrStreamID: 2,
|
||||
},
|
||||
{
|
||||
name: "syn_stream_ctrl_frame_resize_single_read",
|
||||
inputs: [][]byte{append(syn_stream_ctrl_header, synStreamResizePayload...)},
|
||||
wantResizeStreamID: 3,
|
||||
},
|
||||
{
|
||||
name: "syn_stream_ctrl_frame_resize_four_reads_with_leftover",
|
||||
inputs: [][]byte{syn_stream_ctrl_header, append(synStreamResizePayload, syn_stream_ctrl_header...), append(synStreamStderrPayload, syn_stream_ctrl_header...), append(synStreamStdoutPayload, 0x0, 0x3)},
|
||||
wantStdoutStreamID: 1,
|
||||
wantStderrStreamID: 2,
|
||||
wantResizeStreamID: 3,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tc := &testConn{}
|
||||
sr := &testSessionRecorder{}
|
||||
rec := &recorder{
|
||||
conn: sr,
|
||||
clock: cl,
|
||||
start: cl.Now(),
|
||||
}
|
||||
c := &spdyRemoteConnRecorder{
|
||||
Conn: tc,
|
||||
log: zl.Sugar(),
|
||||
rec: rec,
|
||||
}
|
||||
c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead)
|
||||
|
||||
for i, input := range tt.inputs {
|
||||
c.zlibReqReader = reader
|
||||
tc.readBuf.Reset()
|
||||
_, err := tc.readBuf.Write(input)
|
||||
if err != nil {
|
||||
t.Fatalf("writing bytes to test conn: %v", err)
|
||||
}
|
||||
_, err = c.Read(make([]byte, len(input)))
|
||||
if err != nil {
|
||||
t.Errorf("[%d] spdyRemoteConnRecorder.Read() resulted in an unexpected error: %v", i, err)
|
||||
}
|
||||
}
|
||||
if id := c.resizeStreamID.Load(); id != tt.wantResizeStreamID && id != tt.resizeStreamIDBeforeRead {
|
||||
t.Errorf("wants resizeStreamID: %d, got %d", tt.wantResizeStreamID, id)
|
||||
}
|
||||
if id := c.stderrStreamID.Load(); id != tt.wantStderrStreamID {
|
||||
t.Errorf("wants stderrStreamID: %d, got %d", tt.wantStderrStreamID, id)
|
||||
}
|
||||
if id := c.stdoutStreamID.Load(); id != tt.wantStdoutStreamID {
|
||||
t.Errorf("wants stdoutStreamID: %d, got %d", tt.wantStdoutStreamID, id)
|
||||
}
|
||||
if tt.wantHeight != 0 || tt.wantWidth != 0 {
|
||||
if tt.wantWidth != c.ch.Width {
|
||||
t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width)
|
||||
}
|
||||
if tt.wantHeight != c.ch.Height {
|
||||
t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func castLine(t *testing.T, p []byte, clock tstime.Clock) []byte {
|
||||
t.Helper()
|
||||
j, err := json.Marshal([]any{
|
||||
clock.Now().Sub(clock.Now()).Seconds(),
|
||||
"o",
|
||||
string(p),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling cast line: %v", err)
|
||||
}
|
||||
return append(j, '\n')
|
||||
}
|
||||
|
||||
func resizeMsgBytes(t *testing.T, width, height int) []byte {
|
||||
t.Helper()
|
||||
bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height})
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling resizeMsg: %v", err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func asciinemaResizeMsg(t *testing.T, width, height int) []byte {
|
||||
t.Helper()
|
||||
ch := CastHeader{
|
||||
Width: width,
|
||||
Height: height,
|
||||
}
|
||||
bs, err := json.Marshal(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("error marshalling CastHeader: %v", err)
|
||||
}
|
||||
return append(bs, '\n')
|
||||
}
|
||||
|
||||
type testConn struct {
|
||||
net.Conn
|
||||
// writeBuf contains whatever was send to the conn via Write.
|
||||
writeBuf bytes.Buffer
|
||||
// readBuf contains whatever was sent to the conn via Read.
|
||||
readBuf bytes.Buffer
|
||||
sync.RWMutex // protects the following
|
||||
closed bool
|
||||
}
|
||||
|
||||
var _ net.Conn = &testConn{}
|
||||
|
||||
func (tc *testConn) Read(b []byte) (int, error) {
|
||||
return tc.readBuf.Read(b)
|
||||
}
|
||||
|
||||
func (tc *testConn) Write(b []byte) (int, error) {
|
||||
return tc.writeBuf.Write(b)
|
||||
}
|
||||
|
||||
func (tc *testConn) Close() error {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
tc.closed = true
|
||||
return nil
|
||||
}
|
||||
func (tc *testConn) isClosed() bool {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
return tc.closed
|
||||
}
|
||||
|
||||
type testSessionRecorder struct {
|
||||
// buf holds data that was sent to the session recorder.
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (t *testSessionRecorder) Write(b []byte) (int, error) {
|
||||
return t.buf.Write(b)
|
||||
}
|
||||
|
||||
func (t *testSessionRecorder) Close() error {
|
||||
t.buf.Reset()
|
||||
return nil
|
||||
}
|
@@ -1,221 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
)
|
||||
|
||||
// zlibReader contains functionality to parse zlib compressed SPDY data.
|
||||
// See https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10.1
|
||||
type zlibReader struct {
|
||||
io.ReadCloser
|
||||
underlying io.LimitedReader // zlib compressed SPDY data
|
||||
}
|
||||
|
||||
// Read decompresses zlibReader's underlying zlib compressed SPDY data and reads
|
||||
// it into b.
|
||||
func (z *zlibReader) Read(b []byte) (int, error) {
|
||||
if z.ReadCloser == nil {
|
||||
r, err := zlib.NewReaderDict(&z.underlying, spdyTxtDictionary)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
z.ReadCloser = r
|
||||
}
|
||||
return z.ReadCloser.Read(b)
|
||||
}
|
||||
|
||||
// Set sets zlibReader's underlying data. b must be zlib compressed SPDY data.
|
||||
func (z *zlibReader) Set(b []byte) {
|
||||
z.underlying.R = bytes.NewReader(b)
|
||||
z.underlying.N = int64(len(b))
|
||||
}
|
||||
|
||||
// spdyTxtDictionary is the dictionary defined in the SPDY spec.
|
||||
// https://datatracker.ietf.org/doc/html/draft-mbelshe-httpbis-spdy-00#section-2.6.10.1
|
||||
var spdyTxtDictionary = []byte{
|
||||
0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, // - - - - o p t i
|
||||
0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, // o n s - - - - h
|
||||
0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, // e a d - - - - p
|
||||
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, // o s t - - - - p
|
||||
0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, // u t - - - - d e
|
||||
0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, // l e t e - - - -
|
||||
0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, // t r a c e - - -
|
||||
0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, // - a c c e p t -
|
||||
0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p
|
||||
0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // t - c h a r s e
|
||||
0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, // t - - - - a c c
|
||||
0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e p t - e n c o
|
||||
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, // d i n g - - - -
|
||||
0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, // a c c e p t - l
|
||||
0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, // a n g u a g e -
|
||||
0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p
|
||||
0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, // t - r a n g e s
|
||||
0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, // - - - - a g e -
|
||||
0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, // - - - a l l o w
|
||||
0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, // - - - - a u t h
|
||||
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, // o r i z a t i o
|
||||
0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, // n - - - - c a c
|
||||
0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, // h e - c o n t r
|
||||
0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, // o l - - - - c o
|
||||
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, // n n e c t i o n
|
||||
0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t
|
||||
0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, // e n t - b a s e
|
||||
0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t
|
||||
0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e n t - e n c o
|
||||
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, // d i n g - - - -
|
||||
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, // c o n t e n t -
|
||||
0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, // l a n g u a g e
|
||||
0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t
|
||||
0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, // e n t - l e n g
|
||||
0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, // t h - - - - c o
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, // n t e n t - l o
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, // c a t i o n - -
|
||||
0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n
|
||||
0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, // t - m d 5 - - -
|
||||
0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, // - c o n t e n t
|
||||
0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, // - r a n g e - -
|
||||
0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n
|
||||
0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, // t - t y p e - -
|
||||
0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, // - - d a t e - -
|
||||
0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, // - - e t a g - -
|
||||
0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, // - - e x p e c t
|
||||
0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, // - - - - e x p i
|
||||
0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, // r e s - - - - f
|
||||
0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, // r o m - - - - h
|
||||
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, // o s t - - - - i
|
||||
0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, // f - m a t c h -
|
||||
0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, // - - - i f - m o
|
||||
0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, // d i f i e d - s
|
||||
0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, // i n c e - - - -
|
||||
0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, // i f - n o n e -
|
||||
0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, // m a t c h - - -
|
||||
0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, // - i f - r a n g
|
||||
0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, // e - - - - i f -
|
||||
0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, // u n m o d i f i
|
||||
0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, // e d - s i n c e
|
||||
0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, // - - - - l a s t
|
||||
0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, // - m o d i f i e
|
||||
0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, // d - - - - l o c
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, // a t i o n - - -
|
||||
0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, // - m a x - f o r
|
||||
0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, // w a r d s - - -
|
||||
0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, // - p r a g m a -
|
||||
0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, // - - - p r o x y
|
||||
0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, // - a u t h e n t
|
||||
0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, // i c a t e - - -
|
||||
0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, // - p r o x y - a
|
||||
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, // u t h o r i z a
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, // t i o n - - - -
|
||||
0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, // r a n g e - - -
|
||||
0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, // - r e f e r e r
|
||||
0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, // - - - - r e t r
|
||||
0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, // y - a f t e r -
|
||||
0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, // - - - s e r v e
|
||||
0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, // r - - - - t e -
|
||||
0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, // - - - t r a i l
|
||||
0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, // e r - - - - t r
|
||||
0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, // a n s f e r - e
|
||||
0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, // n c o d i n g -
|
||||
0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, // - - - u p g r a
|
||||
0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, // d e - - - - u s
|
||||
0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, // e r - a g e n t
|
||||
0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, // - - - - v a r y
|
||||
0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, // - - - - v i a -
|
||||
0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, // - - - w a r n i
|
||||
0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, // n g - - - - w w
|
||||
0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, // w - a u t h e n
|
||||
0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, // t i c a t e - -
|
||||
0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, // - - m e t h o d
|
||||
0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, // - - - - g e t -
|
||||
0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, // - - - s t a t u
|
||||
0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, // s - - - - 2 0 0
|
||||
0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, // - O K - - - - v
|
||||
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, // e r s i o n - -
|
||||
0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, // - - H T T P - 1
|
||||
0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, // - 1 - - - - u r
|
||||
0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, // l - - - - p u b
|
||||
0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, // l i c - - - - s
|
||||
0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, // e t - c o o k i
|
||||
0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, // e - - - - k e e
|
||||
0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, // p - a l i v e -
|
||||
0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, // - - - o r i g i
|
||||
0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, // n 1 0 0 1 0 1 2
|
||||
0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, // 0 1 2 0 2 2 0 5
|
||||
0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, // 2 0 6 3 0 0 3 0
|
||||
0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, // 2 3 0 3 3 0 4 3
|
||||
0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, // 0 5 3 0 6 3 0 7
|
||||
0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, // 4 0 2 4 0 5 4 0
|
||||
0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, // 6 4 0 7 4 0 8 4
|
||||
0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, // 0 9 4 1 0 4 1 1
|
||||
0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, // 4 1 2 4 1 3 4 1
|
||||
0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, // 4 4 1 5 4 1 6 4
|
||||
0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, // 1 7 5 0 2 5 0 4
|
||||
0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, // 5 0 5 2 0 3 - N
|
||||
0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, // o n - A u t h o
|
||||
0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, // r i t a t i v e
|
||||
0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, // - I n f o r m a
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, // t i o n 2 0 4 -
|
||||
0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, // N o - C o n t e
|
||||
0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, // n t 3 0 1 - M o
|
||||
0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, // v e d - P e r m
|
||||
0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, // a n e n t l y 4
|
||||
0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, // 0 0 - B a d - R
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, // e q u e s t 4 0
|
||||
0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, // 1 - U n a u t h
|
||||
0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, // o r i z e d 4 0
|
||||
0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, // 3 - F o r b i d
|
||||
0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, // d e n 4 0 4 - N
|
||||
0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, // o t - F o u n d
|
||||
0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, // 5 0 0 - I n t e
|
||||
0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, // r n a l - S e r
|
||||
0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, // v e r - E r r o
|
||||
0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, // r 5 0 1 - N o t
|
||||
0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, // - I m p l e m e
|
||||
0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, // n t e d 5 0 3 -
|
||||
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, // S e r v i c e -
|
||||
0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, // U n a v a i l a
|
||||
0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, // b l e J a n - F
|
||||
0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, // e b - M a r - A
|
||||
0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, // p r - M a y - J
|
||||
0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, // u n - J u l - A
|
||||
0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, // u g - S e p t -
|
||||
0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, // O c t - N o v -
|
||||
0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, // D e c - 0 0 - 0
|
||||
0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, // 0 - 0 0 - M o n
|
||||
0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, // - - T u e - - W
|
||||
0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, // e d - - T h u -
|
||||
0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, // - F r i - - S a
|
||||
0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, // t - - S u n - -
|
||||
0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, // G M T c h u n k
|
||||
0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, // e d - t e x t -
|
||||
0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, // h t m l - i m a
|
||||
0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, // g e - p n g - i
|
||||
0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, // m a g e - j p g
|
||||
0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, // - i m a g e - g
|
||||
0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // i f - a p p l i
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x
|
||||
0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // m l - a p p l i
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x
|
||||
0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, // h t m l - x m l
|
||||
0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, // - t e x t - p l
|
||||
0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, // a i n - t e x t
|
||||
0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, // - j a v a s c r
|
||||
0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, // i p t - p u b l
|
||||
0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, // i c p r i v a t
|
||||
0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, // e m a x - a g e
|
||||
0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, // - g z i p - d e
|
||||
0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, // f l a t e - s d
|
||||
0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // c h c h a r s e
|
||||
0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, // t - u t f - 8 c
|
||||
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, // h a r s e t - i
|
||||
0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, // s o - 8 8 5 9 -
|
||||
0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, // 1 - u t f - - -
|
||||
0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, // - e n q - 0 -
|
||||
}
|
@@ -330,6 +330,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
tailscale.com/posture from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/proxymap from tailscale.com/tsd+
|
||||
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
|
||||
LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh
|
||||
LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/syncs from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/tailcfg from tailscale.com/client/tailscale+
|
||||
|
Reference in New Issue
Block a user