derp/xdp,cmd/xdpderper: initial skeleton (#12390)

This commit introduces a userspace program for managing an experimental
eBPF XDP STUN server program. derp/xdp contains the eBPF pseudo-C along
with a Go pkg for loading it and exporting its metrics.
cmd/xdpderper is a package main user of derp/xdp.

Updates tailscale/corp#20689

Signed-off-by: Jordan Whited <jordan@tailscale.com>
This commit is contained in:
Jordan Whited 2024-06-14 08:45:24 -07:00 committed by GitHub
parent 6908fb0de3
commit 65888d95c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 8684 additions and 11 deletions

View File

@ -194,7 +194,7 @@ jobs:
- name: chown
run: chown -R $(id -u):$(id -g) $PWD
- name: privileged tests
run: ./tool/go test ./util/linuxfw
run: ./tool/go test ./util/linuxfw ./derp/xdp
vm:
runs-on: ["self-hosted", "linux", "vm"]
@ -480,7 +480,7 @@ jobs:
uses: actions/checkout@v4
- name: check that 'go generate' is clean
run: |
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator')
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
./tool/go generate $pkgs
echo
echo

View File

@ -0,0 +1,76 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"flag"
"log"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"github.com/prometheus/client_golang/prometheus"
"tailscale.com/derp/xdp"
"tailscale.com/tsweb"
)
var (
flagDevice = flag.String("device", "", "target device name")
flagPort = flag.Int("dst-port", 0, "destination UDP port to serve")
flagVerbose = flag.Bool("verbose", false, "verbose output including verifier errors")
flagMode = flag.String("mode", "xdp", "XDP mode; valid modes: [xdp, xdpgeneric, xdpdrv, xdpoffload]")
flagHTTP = flag.String("http", ":8230", "HTTP listen address")
)
func main() {
flag.Parse()
var attachFlags xdp.XDPAttachFlags
switch strings.ToLower(*flagMode) {
case "xdp":
attachFlags = 0
case "xdpgeneric":
attachFlags = xdp.XDPGenericMode
case "xdpdrv":
attachFlags = xdp.XDPDriverMode
case "xdpoffload":
attachFlags = xdp.XDPOffloadMode
default:
log.Fatal("invalid mode")
}
server, err := xdp.NewSTUNServer(&xdp.STUNServerConfig{
DeviceName: *flagDevice,
DstPort: *flagPort,
AttachFlags: attachFlags,
FullVerifierErr: *flagVerbose,
})
if err != nil {
log.Fatalf("failed to init XDP STUN server: %v", err)
}
defer server.Close()
err = prometheus.Register(server)
if err != nil {
log.Fatalf("failed to register XDP STUN server as a prometheus collector: %v", err)
}
log.Println("XDP STUN server started")
mux := http.NewServeMux()
tsweb.Debugger(mux)
errCh := make(chan error, 1)
go func() {
err := http.ListenAndServe(*flagHTTP, mux)
errCh <- err
}()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
select {
case err := <-errCh:
log.Printf("HTTP serve err: %v", err)
case sig := <-sigCh:
log.Printf("received signal: %s", sig)
}
}

166
derp/xdp/bpf_bpfeb.go Normal file
View File

@ -0,0 +1,166 @@
// Code generated by bpf2go; DO NOT EDIT.
//go:build mips || mips64 || ppc64 || s390x
package xdp
import (
"bytes"
_ "embed"
"fmt"
"io"
"github.com/cilium/ebpf"
)
type bpfConfig struct{ DstPort uint16 }
type bpfCounterKeyAf uint32
const (
bpfCounterKeyAfCOUNTER_KEY_AF_UNKNOWN bpfCounterKeyAf = 0
bpfCounterKeyAfCOUNTER_KEY_AF_IPV4 bpfCounterKeyAf = 1
bpfCounterKeyAfCOUNTER_KEY_AF_IPV6 bpfCounterKeyAf = 2
bpfCounterKeyAfCOUNTER_KEY_AF_LEN bpfCounterKeyAf = 3
)
type bpfCounterKeyPacketsBytesAction uint32
const (
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL bpfCounterKeyPacketsBytesAction = 0
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL bpfCounterKeyPacketsBytesAction = 1
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_ABORTED_TOTAL bpfCounterKeyPacketsBytesAction = 2
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_ABORTED_TOTAL bpfCounterKeyPacketsBytesAction = 3
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL bpfCounterKeyPacketsBytesAction = 4
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL bpfCounterKeyPacketsBytesAction = 5
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_DROP_TOTAL bpfCounterKeyPacketsBytesAction = 6
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_DROP_TOTAL bpfCounterKeyPacketsBytesAction = 7
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN bpfCounterKeyPacketsBytesAction = 8
)
type bpfCounterKeyProgEnd uint32
const (
bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED bpfCounterKeyProgEnd = 0
bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR bpfCounterKeyProgEnd = 1
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM bpfCounterKeyProgEnd = 2
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_IP_CSUM bpfCounterKeyProgEnd = 3
bpfCounterKeyProgEndCOUNTER_KEY_END_NOT_STUN_PORT bpfCounterKeyProgEnd = 4
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL bpfCounterKeyProgEnd = 5
bpfCounterKeyProgEndCOUNTER_KEY_END_LEN bpfCounterKeyProgEnd = 6
)
type bpfCountersKey struct {
Unused uint8
Af uint8
Pba uint8
ProgEnd uint8
}
// loadBpf returns the embedded CollectionSpec for bpf.
func loadBpf() (*ebpf.CollectionSpec, error) {
reader := bytes.NewReader(_BpfBytes)
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
if err != nil {
return nil, fmt.Errorf("can't load bpf: %w", err)
}
return spec, err
}
// loadBpfObjects loads bpf and converts it into a struct.
//
// The following types are suitable as obj argument:
//
// *bpfObjects
// *bpfPrograms
// *bpfMaps
//
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
spec, err := loadBpf()
if err != nil {
return err
}
return spec.LoadAndAssign(obj, opts)
}
// bpfSpecs contains maps and programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfSpecs struct {
bpfProgramSpecs
bpfMapSpecs
}
// bpfSpecs contains programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfProgramSpecs struct {
XdpProgFunc *ebpf.ProgramSpec `ebpf:"xdp_prog_func"`
}
// bpfMapSpecs contains maps before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfMapSpecs struct {
ConfigMap *ebpf.MapSpec `ebpf:"config_map"`
CountersMap *ebpf.MapSpec `ebpf:"counters_map"`
}
// bpfObjects contains all objects after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfObjects struct {
bpfPrograms
bpfMaps
}
func (o *bpfObjects) Close() error {
return _BpfClose(
&o.bpfPrograms,
&o.bpfMaps,
)
}
// bpfMaps contains all maps after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfMaps struct {
ConfigMap *ebpf.Map `ebpf:"config_map"`
CountersMap *ebpf.Map `ebpf:"counters_map"`
}
func (m *bpfMaps) Close() error {
return _BpfClose(
m.ConfigMap,
m.CountersMap,
)
}
// bpfPrograms contains all programs after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfPrograms struct {
XdpProgFunc *ebpf.Program `ebpf:"xdp_prog_func"`
}
func (p *bpfPrograms) Close() error {
return _BpfClose(
p.XdpProgFunc,
)
}
func _BpfClose(closers ...io.Closer) error {
for _, closer := range closers {
if err := closer.Close(); err != nil {
return err
}
}
return nil
}
// Do not access this directly.
//
//go:embed bpf_bpfeb.o
var _BpfBytes []byte

BIN
derp/xdp/bpf_bpfeb.o Normal file

Binary file not shown.

166
derp/xdp/bpf_bpfel.go Normal file
View File

@ -0,0 +1,166 @@
// Code generated by bpf2go; DO NOT EDIT.
//go:build 386 || amd64 || arm || arm64 || loong64 || mips64le || mipsle || ppc64le || riscv64
package xdp
import (
"bytes"
_ "embed"
"fmt"
"io"
"github.com/cilium/ebpf"
)
type bpfConfig struct{ DstPort uint16 }
type bpfCounterKeyAf uint32
const (
bpfCounterKeyAfCOUNTER_KEY_AF_UNKNOWN bpfCounterKeyAf = 0
bpfCounterKeyAfCOUNTER_KEY_AF_IPV4 bpfCounterKeyAf = 1
bpfCounterKeyAfCOUNTER_KEY_AF_IPV6 bpfCounterKeyAf = 2
bpfCounterKeyAfCOUNTER_KEY_AF_LEN bpfCounterKeyAf = 3
)
type bpfCounterKeyPacketsBytesAction uint32
const (
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL bpfCounterKeyPacketsBytesAction = 0
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL bpfCounterKeyPacketsBytesAction = 1
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_ABORTED_TOTAL bpfCounterKeyPacketsBytesAction = 2
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_ABORTED_TOTAL bpfCounterKeyPacketsBytesAction = 3
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL bpfCounterKeyPacketsBytesAction = 4
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL bpfCounterKeyPacketsBytesAction = 5
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_DROP_TOTAL bpfCounterKeyPacketsBytesAction = 6
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_DROP_TOTAL bpfCounterKeyPacketsBytesAction = 7
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN bpfCounterKeyPacketsBytesAction = 8
)
type bpfCounterKeyProgEnd uint32
const (
bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED bpfCounterKeyProgEnd = 0
bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR bpfCounterKeyProgEnd = 1
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM bpfCounterKeyProgEnd = 2
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_IP_CSUM bpfCounterKeyProgEnd = 3
bpfCounterKeyProgEndCOUNTER_KEY_END_NOT_STUN_PORT bpfCounterKeyProgEnd = 4
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL bpfCounterKeyProgEnd = 5
bpfCounterKeyProgEndCOUNTER_KEY_END_LEN bpfCounterKeyProgEnd = 6
)
type bpfCountersKey struct {
Unused uint8
Af uint8
Pba uint8
ProgEnd uint8
}
// loadBpf returns the embedded CollectionSpec for bpf.
func loadBpf() (*ebpf.CollectionSpec, error) {
reader := bytes.NewReader(_BpfBytes)
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
if err != nil {
return nil, fmt.Errorf("can't load bpf: %w", err)
}
return spec, err
}
// loadBpfObjects loads bpf and converts it into a struct.
//
// The following types are suitable as obj argument:
//
// *bpfObjects
// *bpfPrograms
// *bpfMaps
//
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
spec, err := loadBpf()
if err != nil {
return err
}
return spec.LoadAndAssign(obj, opts)
}
// bpfSpecs contains maps and programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfSpecs struct {
bpfProgramSpecs
bpfMapSpecs
}
// bpfSpecs contains programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfProgramSpecs struct {
XdpProgFunc *ebpf.ProgramSpec `ebpf:"xdp_prog_func"`
}
// bpfMapSpecs contains maps before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type bpfMapSpecs struct {
ConfigMap *ebpf.MapSpec `ebpf:"config_map"`
CountersMap *ebpf.MapSpec `ebpf:"counters_map"`
}
// bpfObjects contains all objects after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfObjects struct {
bpfPrograms
bpfMaps
}
func (o *bpfObjects) Close() error {
return _BpfClose(
&o.bpfPrograms,
&o.bpfMaps,
)
}
// bpfMaps contains all maps after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfMaps struct {
ConfigMap *ebpf.Map `ebpf:"config_map"`
CountersMap *ebpf.Map `ebpf:"counters_map"`
}
func (m *bpfMaps) Close() error {
return _BpfClose(
m.ConfigMap,
m.CountersMap,
)
}
// bpfPrograms contains all programs after they have been loaded into the kernel.
//
// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign.
type bpfPrograms struct {
XdpProgFunc *ebpf.Program `ebpf:"xdp_prog_func"`
}
func (p *bpfPrograms) Close() error {
return _BpfClose(
p.XdpProgFunc,
)
}
func _BpfClose(closers ...io.Closer) error {
for _, closer := range closers {
if err := closer.Close(); err != nil {
return err
}
}
return nil
}
// Do not access this directly.
//
//go:embed bpf_bpfel.o
var _BpfBytes []byte

BIN
derp/xdp/bpf_bpfel.o Normal file

Binary file not shown.

View File

@ -0,0 +1,32 @@
Valid-License-Identifier: BSD-2-Clause
SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html
Usage-Guide:
To use the BSD 2-clause "Simplified" License put the following SPDX
tag/value pair into a comment according to the placement guidelines in
the licensing rules documentation:
SPDX-License-Identifier: BSD-2-Clause
License-Text:
Copyright (c) 2015 The Libbpf Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__
/*
* Isolate byte #n and put it into byte #m, for __u##b type.
* E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64:
* 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx
* 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000
* 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn
* 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000
*/
#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8))
#define ___bpf_swab16(x) ((__u16)( \
___bpf_mvb(x, 16, 0, 1) | \
___bpf_mvb(x, 16, 1, 0)))
#define ___bpf_swab32(x) ((__u32)( \
___bpf_mvb(x, 32, 0, 3) | \
___bpf_mvb(x, 32, 1, 2) | \
___bpf_mvb(x, 32, 2, 1) | \
___bpf_mvb(x, 32, 3, 0)))
#define ___bpf_swab64(x) ((__u64)( \
___bpf_mvb(x, 64, 0, 7) | \
___bpf_mvb(x, 64, 1, 6) | \
___bpf_mvb(x, 64, 2, 5) | \
___bpf_mvb(x, 64, 3, 4) | \
___bpf_mvb(x, 64, 4, 3) | \
___bpf_mvb(x, 64, 5, 2) | \
___bpf_mvb(x, 64, 6, 1) | \
___bpf_mvb(x, 64, 7, 0)))
/* LLVM's BPF target selects the endianness of the CPU
* it compiles on, or the user specifies (bpfel/bpfeb),
* respectively. The used __BYTE_ORDER__ is defined by
* the compiler, we cannot rely on __BYTE_ORDER from
* libc headers, since it doesn't reflect the actual
* requested byte order.
*
* Note, LLVM's BPF target has different __builtin_bswapX()
* semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
* in bpfel and bpfeb case, which means below, that we map
* to cpu_to_be16(). We could use it unconditionally in BPF
* case, but better not rely on it, so that this header here
* can be used from application and BPF program side, which
* use different targets.
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define __bpf_ntohs(x) __builtin_bswap16(x)
# define __bpf_htons(x) __builtin_bswap16(x)
# define __bpf_constant_ntohs(x) ___bpf_swab16(x)
# define __bpf_constant_htons(x) ___bpf_swab16(x)
# define __bpf_ntohl(x) __builtin_bswap32(x)
# define __bpf_htonl(x) __builtin_bswap32(x)
# define __bpf_constant_ntohl(x) ___bpf_swab32(x)
# define __bpf_constant_htonl(x) ___bpf_swab32(x)
# define __bpf_be64_to_cpu(x) __builtin_bswap64(x)
# define __bpf_cpu_to_be64(x) __builtin_bswap64(x)
# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x)
# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define __bpf_ntohs(x) (x)
# define __bpf_htons(x) (x)
# define __bpf_constant_ntohs(x) (x)
# define __bpf_constant_htons(x) (x)
# define __bpf_ntohl(x) (x)
# define __bpf_htonl(x) (x)
# define __bpf_constant_ntohl(x) (x)
# define __bpf_constant_htonl(x) (x)
# define __bpf_be64_to_cpu(x) (x)
# define __bpf_cpu_to_be64(x) (x)
# define __bpf_constant_be64_to_cpu(x) (x)
# define __bpf_constant_cpu_to_be64(x) (x)
#else
# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
#define bpf_htons(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_htons(x) : __bpf_htons(x))
#define bpf_ntohs(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_ntohs(x) : __bpf_ntohs(x))
#define bpf_htonl(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_htonl(x) : __bpf_htonl(x))
#define bpf_ntohl(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_ntohl(x) : __bpf_ntohl(x))
#define bpf_cpu_to_be64(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x))
#define bpf_be64_to_cpu(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x))
#endif /* __BPF_ENDIAN__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,410 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_HELPERS__
#define __BPF_HELPERS__
/*
* Note that bpf programs need to include either
* vmlinux.h (auto-generated from BTF) or linux/types.h
* in advance since bpf_helper_defs.h uses such types
* as __u64.
*/
#include "bpf_helper_defs.h"
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by libbpf depending on the context (BPF programs, BPF maps,
* extern variables, etc).
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
* make sure __attribute__((unused)) doesn't trigger compilation warning.
*/
#if __GNUC__ && !__clang__
/*
* Pragma macros are broken on GCC
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
*/
#define SEC(name) __attribute__((section(name), used))
#else
#define SEC(name) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
__attribute__((section(name), used)) \
_Pragma("GCC diagnostic pop") \
#endif
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
#ifndef __noinline
#define __noinline __attribute__((noinline))
#endif
#ifndef __weak
#define __weak __attribute__((weak))
#endif
/*
* Use __hidden attribute to mark a non-static BPF subprogram effectively
* static for BPF verifier's verification algorithm purposes, allowing more
* extensive and permissive BPF verification process, taking into account
* subprogram's caller context.
*/
#define __hidden __attribute__((visibility("hidden")))
/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
* any system-level headers (such as stddef.h, linux/version.h, etc), and
* commonly-used macros like NULL and KERNEL_VERSION aren't available through
* vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
* them on their own. So as a convenience, provide such definitions here.
*/
#ifndef NULL
#define NULL ((void *)0)
#endif
#ifndef KERNEL_VERSION
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
#endif
/*
* Helper macros to manipulate data structures
*/
/* offsetof() definition that uses __builtin_offset() might not preserve field
* offset CO-RE relocation properly, so force-redefine offsetof() using
* old-school approach which works with CO-RE correctly
*/
#undef offsetof
#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
/* redefined container_of() to ensure we use the above offsetof() macro */
#undef container_of
#define container_of(ptr, type, member) \
({ \
void *__mptr = (void *)(ptr); \
((type *)(__mptr - offsetof(type, member))); \
})
/*
* Compiler (optimization) barrier.
*/
#ifndef barrier
#define barrier() asm volatile("" ::: "memory")
#endif
/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
* compiler believe that there is some black box modification of a given
* variable and thus prevents compiler from making extra assumption about its
* value and potential simplifications and optimizations on this variable.
*
* E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
* a variable, making some code patterns unverifiable. Putting barrier_var()
* in place will ensure that cast is performed before the barrier_var()
* invocation, because compiler has to pessimistically assume that embedded
* asm section might perform some extra operations on that variable.
*
* This is a variable-specific variant of more global barrier().
*/
#ifndef barrier_var
#define barrier_var(var) asm volatile("" : "+r"(var))
#endif
/*
* Helper macro to throw a compilation error if __bpf_unreachable() gets
* built into the resulting code. This works given BPF back end does not
* implement __builtin_trap(). This is useful to assert that certain paths
* of the program code are never used and hence eliminated by the compiler.
*
* For example, consider a switch statement that covers known cases used by
* the program. __bpf_unreachable() can then reside in the default case. If
* the program gets extended such that a case is not covered in the switch
* statement, then it will throw a build error due to the default case not
* being compiled out.
*/
#ifndef __bpf_unreachable
# define __bpf_unreachable() __builtin_trap()
#endif
/*
* Helper function to perform a tail call with a constant/immediate map slot.
*/
#if __clang_major__ >= 8 && defined(__bpf__)
static __always_inline void
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
{
if (!__builtin_constant_p(slot))
__bpf_unreachable();
/*
* Provide a hard guarantee that LLVM won't optimize setting r2 (map
* pointer) and r3 (constant map index) from _different paths_ ending
* up at the _same_ call insn as otherwise we won't be able to use the
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
* tracking for prog array pokes") for details on verifier tracking.
*
* Note on clobber list: we need to stay in-line with BPF calling
* convention, so even if we don't end up using r0, r4, r5, we need
* to mark them as clobber so that LLVM doesn't end up using them
* before / after the call.
*/
asm volatile("r1 = %[ctx]\n\t"
"r2 = %[map]\n\t"
"r3 = %[slot]\n\t"
"call 12"
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
: "r0", "r1", "r2", "r3", "r4", "r5");
}
#endif
enum libbpf_pin_type {
LIBBPF_PIN_NONE,
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
LIBBPF_PIN_BY_NAME,
};
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
TRI_MODULE = 2,
};
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
#define __kptr __attribute__((btf_type_tag("kptr")))
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
#define bpf_ksym_exists(sym) ({ \
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
!!sym; \
})
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
#endif
#ifndef ___bpf_apply
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
#endif
#ifndef ___bpf_nth
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
#endif
#ifndef ___bpf_narg
#define ___bpf_narg(...) \
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#endif
#define ___bpf_fill0(arr, p, x) do {} while (0)
#define ___bpf_fill1(arr, p, x) arr[p] = x
#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
#define ___bpf_fill(arr, args...) \
___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
/*
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
* in a structure.
*/
#define BPF_SEQ_PRINTF(seq, fmt, args...) \
({ \
static const char ___fmt[] = fmt; \
unsigned long long ___param[___bpf_narg(args)]; \
\
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
___bpf_fill(___param, args); \
_Pragma("GCC diagnostic pop") \
\
bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
___param, sizeof(___param)); \
})
/*
* BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
* an array of u64.
*/
#define BPF_SNPRINTF(out, out_size, fmt, args...) \
({ \
static const char ___fmt[] = fmt; \
unsigned long long ___param[___bpf_narg(args)]; \
\
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
___bpf_fill(___param, args); \
_Pragma("GCC diagnostic pop") \
\
bpf_snprintf(out, out_size, ___fmt, \
___param, sizeof(___param)); \
})
#ifdef BPF_NO_GLOBAL_DATA
#define BPF_PRINTK_FMT_MOD
#else
#define BPF_PRINTK_FMT_MOD static const
#endif
#define __bpf_printk(fmt, ...) \
({ \
BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
/*
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
* instead of an array of u64.
*/
#define __bpf_vprintk(fmt, args...) \
({ \
static const char ___fmt[] = fmt; \
unsigned long long ___param[___bpf_narg(args)]; \
\
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
___bpf_fill(___param, args); \
_Pragma("GCC diagnostic pop") \
\
bpf_trace_vprintk(___fmt, sizeof(___fmt), \
___param, sizeof(___param)); \
})
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
* Otherwise use __bpf_vprintk
*/
#define ___bpf_pick_printk(...) \
___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
__bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
__bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
__bpf_printk /*1*/, __bpf_printk /*0*/)
/* Helper macro to print out debug messages */
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
struct bpf_iter_num;
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym;
extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
#ifndef bpf_for_each
/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
* using BPF open-coded iterators without having to write mundane explicit
* low-level loop logic. Instead, it provides for()-like generic construct
* that can be used pretty naturally. E.g., for some hypothetical cgroup
* iterator, you'd write:
*
* struct cgroup *cg, *parent_cg = <...>;
*
* bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
* bpf_printk("Child cgroup id = %d", cg->cgroup_id);
* if (cg->cgroup_id == 123)
* break;
* }
*
* I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier.
*
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient.
*
* Note: this macro relies on C99 feature of allowing to declare variables
* inside for() loop, bound to for() loop lifetime. It also utilizes GCC
* extension: __attribute__((cleanup(<func>))), supported by both GCC and
* Clang.
*/
#define bpf_for_each(type, cur, args...) for ( \
/* initialize and define destructor */ \
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
cleanup(bpf_iter_##type##_destroy))), \
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
*___p __attribute__((unused)) = ( \
bpf_iter_##type##_new(&___it, ##args), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_##type##_destroy, (void *)0); \
/* iteration and termination check */ \
(((cur) = bpf_iter_##type##_next(&___it))); \
)
#endif /* bpf_for_each */
#ifndef bpf_for
/* bpf_for(i, start, end) implements a for()-like looping construct that sets
* provided integer variable *i* to values starting from *start* through,
* but not including, *end*. It also proves to BPF verifier that *i* belongs
* to range [start, end), so this can be used for accessing arrays without
* extra checks.
*
* Note: *start* and *end* are assumed to be expressions with no side effects
* and whose values do not change throughout bpf_for() loop execution. They do
* not have to be statically known or constant, though.
*
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
* loop bound variables and cleanup attribute, supported by GCC and Clang.
*/
#define bpf_for(i, start, end) for ( \
/* initialize and define destructor */ \
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p __attribute__((unused)) = ( \
bpf_iter_num_new(&___it, (start), (end)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \
({ \
/* iteration step */ \
int *___t = bpf_iter_num_next(&___it); \
/* termination and bounds check */ \
(___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
}); \
)
#endif /* bpf_for */
#ifndef bpf_repeat
/* bpf_repeat(N) performs N iterations without exposing iteration number
*
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
* loop bound variables and cleanup attribute, supported by GCC and Clang.
*/
#define bpf_repeat(N) for ( \
/* initialize and define destructor */ \
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p __attribute__((unused)) = ( \
bpf_iter_num_new(&___it, 0, (N)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \
bpf_iter_num_next(&___it); \
/* nothing here */ \
)
#endif /* bpf_repeat */
#endif

View File

@ -0,0 +1,922 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_TRACING_H__
#define __BPF_TRACING_H__
#include "bpf_helpers.h"
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_riscv)
#define bpf_target_riscv
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arc)
#define bpf_target_arc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_loongarch)
#define bpf_target_loongarch
#define bpf_target_defined
#else
/* Fall back to what the compiler says */
#if defined(__x86_64__)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__s390__)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__arm__)
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__aarch64__)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__mips__)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__powerpc__)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__sparc__)
#define bpf_target_sparc
#define bpf_target_defined
#elif defined(__riscv) && __riscv_xlen == 64
#define bpf_target_riscv
#define bpf_target_defined
#elif defined(__arc__)
#define bpf_target_arc
#define bpf_target_defined
#elif defined(__loongarch__)
#define bpf_target_loongarch
#define bpf_target_defined
#endif /* no compiler target */
#endif
#ifndef __BPF_TARGET_MISSING
#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
#endif
#if defined(bpf_target_x86)
/*
* https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI
*/
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
#define __PT_PARM1_REG di
#define __PT_PARM2_REG si
#define __PT_PARM3_REG dx
#define __PT_PARM4_REG cx
#define __PT_PARM5_REG r8
#define __PT_PARM6_REG r9
/*
* Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64
* comments in Linux sources. And refer to syscall(2) manpage.
*/
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG r10
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG sp
#define __PT_FP_REG bp
#define __PT_RC_REG ax
#define __PT_SP_REG sp
#define __PT_IP_REG ip
#else
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define __PT_PARM1_REG eax
#define __PT_PARM2_REG edx
#define __PT_PARM3_REG ecx
/* i386 syscall ABI is very different, refer to syscall(2) manpage */
#define __PT_PARM1_SYSCALL_REG ebx
#define __PT_PARM2_SYSCALL_REG ecx
#define __PT_PARM3_SYSCALL_REG edx
#define __PT_PARM4_SYSCALL_REG esi
#define __PT_PARM5_SYSCALL_REG edi
#define __PT_PARM6_SYSCALL_REG ebp
#define __PT_RET_REG esp
#define __PT_FP_REG ebp
#define __PT_RC_REG eax
#define __PT_SP_REG esp
#define __PT_IP_REG eip
#else /* __i386__ */
#define __PT_PARM1_REG rdi
#define __PT_PARM2_REG rsi
#define __PT_PARM3_REG rdx
#define __PT_PARM4_REG rcx
#define __PT_PARM5_REG r8
#define __PT_PARM6_REG r9
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG r10
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG rsp
#define __PT_FP_REG rbp
#define __PT_RC_REG rax
#define __PT_SP_REG rsp
#define __PT_IP_REG rip
#endif /* __i386__ */
#endif /* __KERNEL__ || __VMLINUX_H__ */
#elif defined(bpf_target_s390)
/*
* https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf
*/
struct pt_regs___s390 {
unsigned long orig_gpr2;
};
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
#define __PT_PARM1_REG gprs[2]
#define __PT_PARM2_REG gprs[3]
#define __PT_PARM3_REG gprs[4]
#define __PT_PARM4_REG gprs[5]
#define __PT_PARM5_REG gprs[6]
#define __PT_PARM1_SYSCALL_REG orig_gpr2
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG gprs[7]
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
#define __PT_RET_REG gprs[14]
#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG gprs[2]
#define __PT_SP_REG gprs[15]
#define __PT_IP_REG psw.addr
#elif defined(bpf_target_arm)
/*
* https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers
*/
#define __PT_PARM1_REG uregs[0]
#define __PT_PARM2_REG uregs[1]
#define __PT_PARM3_REG uregs[2]
#define __PT_PARM4_REG uregs[3]
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG uregs[4]
#define __PT_PARM6_SYSCALL_REG uregs[5]
#define __PT_PARM7_SYSCALL_REG uregs[6]
#define __PT_RET_REG uregs[14]
#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG uregs[0]
#define __PT_SP_REG uregs[13]
#define __PT_IP_REG uregs[12]
#elif defined(bpf_target_arm64)
/*
* https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers
*/
struct pt_regs___arm64 {
unsigned long orig_x0;
};
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
#define __PT_PARM1_REG regs[0]
#define __PT_PARM2_REG regs[1]
#define __PT_PARM3_REG regs[2]
#define __PT_PARM4_REG regs[3]
#define __PT_PARM5_REG regs[4]
#define __PT_PARM6_REG regs[5]
#define __PT_PARM7_REG regs[6]
#define __PT_PARM8_REG regs[7]
#define __PT_PARM1_SYSCALL_REG orig_x0
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
#define __PT_RET_REG regs[30]
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG regs[0]
#define __PT_SP_REG sp
#define __PT_IP_REG pc
#elif defined(bpf_target_mips)
/*
* N64 ABI is assumed right now.
* https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions
*/
#define __PT_PARM1_REG regs[4]
#define __PT_PARM2_REG regs[5]
#define __PT_PARM3_REG regs[6]
#define __PT_PARM4_REG regs[7]
#define __PT_PARM5_REG regs[8]
#define __PT_PARM6_REG regs[9]
#define __PT_PARM7_REG regs[10]
#define __PT_PARM8_REG regs[11]
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */
#define __PT_RET_REG regs[31]
#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
#define __PT_RC_REG regs[2]
#define __PT_SP_REG regs[29]
#define __PT_IP_REG cp0_epc
#elif defined(bpf_target_powerpc)
/*
* http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14,
* section "Function Calling Sequence")
*/
#define __PT_PARM1_REG gpr[3]
#define __PT_PARM2_REG gpr[4]
#define __PT_PARM3_REG gpr[5]
#define __PT_PARM4_REG gpr[6]
#define __PT_PARM5_REG gpr[7]
#define __PT_PARM6_REG gpr[8]
#define __PT_PARM7_REG gpr[9]
#define __PT_PARM8_REG gpr[10]
/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
#define __PT_PARM1_SYSCALL_REG orig_gpr3
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#if !defined(__arch64__)
#define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */
#endif
#define __PT_RET_REG regs[31]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG gpr[3]
#define __PT_SP_REG sp
#define __PT_IP_REG nip
#elif defined(bpf_target_sparc)
/*
* https://en.wikipedia.org/wiki/Calling_convention#SPARC
*/
#define __PT_PARM1_REG u_regs[UREG_I0]
#define __PT_PARM2_REG u_regs[UREG_I1]
#define __PT_PARM3_REG u_regs[UREG_I2]
#define __PT_PARM4_REG u_regs[UREG_I3]
#define __PT_PARM5_REG u_regs[UREG_I4]
#define __PT_PARM6_REG u_regs[UREG_I5]
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG u_regs[UREG_I7]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG u_regs[UREG_I0]
#define __PT_SP_REG u_regs[UREG_FP]
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define __PT_IP_REG tpc
#else
#define __PT_IP_REG pc
#endif
#elif defined(bpf_target_riscv)
/*
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
#define __PT_PARM2_REG a1
#define __PT_PARM3_REG a2
#define __PT_PARM4_REG a3
#define __PT_PARM5_REG a4
#define __PT_PARM6_REG a5
#define __PT_PARM7_REG a6
#define __PT_PARM8_REG a7
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG ra
#define __PT_FP_REG s0
#define __PT_RC_REG a0
#define __PT_SP_REG sp
#define __PT_IP_REG pc
#elif defined(bpf_target_arc)
/*
* Section "Function Calling Sequence" (page 24):
* https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
*/
/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG scratch.r0
#define __PT_PARM2_REG scratch.r1
#define __PT_PARM3_REG scratch.r2
#define __PT_PARM4_REG scratch.r3
#define __PT_PARM5_REG scratch.r4
#define __PT_PARM6_REG scratch.r5
#define __PT_PARM7_REG scratch.r6
#define __PT_PARM8_REG scratch.r7
/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG scratch.blink
#define __PT_FP_REG scratch.fp
#define __PT_RC_REG scratch.r0
#define __PT_SP_REG scratch.sp
#define __PT_IP_REG scratch.ret
#elif defined(bpf_target_loongarch)
/*
* https://docs.kernel.org/loongarch/introduction.html
* https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
*/
/* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
#define __PT_PARM1_REG regs[4]
#define __PT_PARM2_REG regs[5]
#define __PT_PARM3_REG regs[6]
#define __PT_PARM4_REG regs[7]
#define __PT_PARM5_REG regs[8]
#define __PT_PARM6_REG regs[9]
#define __PT_PARM7_REG regs[10]
#define __PT_PARM8_REG regs[11]
/* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
#define __PT_RET_REG regs[1]
#define __PT_FP_REG regs[22]
#define __PT_RC_REG regs[4]
#define __PT_SP_REG regs[3]
#define __PT_IP_REG csr_era
#endif
#if defined(bpf_target_defined)
struct pt_regs;
/* allow some architectures to override `struct pt_regs` */
#ifndef __PT_REGS_CAST
#define __PT_REGS_CAST(x) (x)
#endif
/*
* Different architectures support different number of arguments passed
* through registers. i386 supports just 3, some arches support up to 8.
*/
#ifndef __PT_PARM4_REG
#define __PT_PARM4_REG __unsupported__
#endif
#ifndef __PT_PARM5_REG
#define __PT_PARM5_REG __unsupported__
#endif
#ifndef __PT_PARM6_REG
#define __PT_PARM6_REG __unsupported__
#endif
#ifndef __PT_PARM7_REG
#define __PT_PARM7_REG __unsupported__
#endif
#ifndef __PT_PARM8_REG
#define __PT_PARM8_REG __unsupported__
#endif
/*
* Similarly, syscall-specific conventions might differ between function call
* conventions within each architecutre. All supported architectures pass
* either 6 or 7 syscall arguments in registers.
*
* See syscall(2) manpage for succinct table with information on each arch.
*/
#ifndef __PT_PARM7_SYSCALL_REG
#define __PT_PARM7_SYSCALL_REG __unsupported__
#endif
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG)
#define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG)
#define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG)
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
#define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG)
#define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG)
#define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG)
#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
#if defined(bpf_target_powerpc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
#ifndef PT_REGS_PARM1_SYSCALL
#define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM2_SYSCALL
#define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG)
#define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM3_SYSCALL
#define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG)
#define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM4_SYSCALL
#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG)
#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM5_SYSCALL
#define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG)
#define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM6_SYSCALL
#define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG)
#define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG)
#endif
#ifndef PT_REGS_PARM7_SYSCALL
#define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG)
#define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG)
#endif
#else /* defined(bpf_target_defined) */
#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
#endif /* defined(bpf_target_defined) */
/*
* When invoked from a syscall handler kprobe, returns a pointer to a
* struct pt_regs containing syscall arguments and suitable for passing to
* PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
*/
#ifndef PT_REGS_SYSCALL_REGS
/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
#endif
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
#endif
#ifndef ___bpf_apply
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
#endif
#ifndef ___bpf_nth
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
#endif
#ifndef ___bpf_narg
#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#endif
#define ___bpf_ctx_cast0() ctx
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
/*
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
* similar kinds of BPF programs, that accept input arguments as a single
* pointer to untyped u64 array, where each u64 can actually be a typed
* pointer or integer of different size. Instead of requring user to write
* manual casts and work with array elements by index, BPF_PROG macro
* allows user to declare a list of named and typed input arguments in the
* same syntax as for normal C function. All the casting is hidden and
* performed transparently, while user code can just assume working with
* function arguments of specified type and name.
*
* Original raw context argument is preserved as well as 'ctx' argument.
* This is useful when using BPF helpers that expect original context
* as one of the parameters (e.g., for bpf_perf_event_output()).
*/
#define BPF_PROG(name, args...) \
name(unsigned long long *ctx); \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args); \
typeof(name(0)) name(unsigned long long *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_ctx_cast(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args)
#ifndef ___bpf_nth2
#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
_14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
#endif
#ifndef ___bpf_narg2
#define ___bpf_narg2(...) \
___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
#endif
#define ___bpf_treg_cnt(t) \
__builtin_choose_expr(sizeof(t) == 1, 1, \
__builtin_choose_expr(sizeof(t) == 2, 1, \
__builtin_choose_expr(sizeof(t) == 4, 1, \
__builtin_choose_expr(sizeof(t) == 8, 1, \
__builtin_choose_expr(sizeof(t) == 16, 2, \
(void)0)))))
#define ___bpf_reg_cnt0() (0)
#define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
#define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
#define ___bpf_union_arg(t, x, n) \
__builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
__builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
__builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
__builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
__builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
(void)0)))))
#define ___bpf_ctx_arg0(n, args...)
#define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
#define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
#define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
#define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
#define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
#define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
#define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
#define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
#define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
#define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
#define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
#define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
#define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
#define ___bpf_ctx_decl0()
#define ___bpf_ctx_decl1(t, x) , t x
#define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args)
#define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args)
#define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args)
#define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args)
#define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args)
#define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args)
#define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args)
#define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args)
#define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args)
#define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args)
#define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args)
#define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
/*
* BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
* arguments. Since each struct argument might take one or two u64 values
* in the trampoline stack, argument type size is needed to place proper number
* of u64 values for each argument. Therefore, BPF_PROG2 has different
* syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
*
* int BPF_PROG(test2, int a, int b) { ... }
*
* the corresponding BPF_PROG2 syntax is:
*
* int BPF_PROG2(test2, int, a, int, b) { ... }
*
* where type and the corresponding argument name are separated by comma.
*
* Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
* than 8 bytes:
*
* int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
* int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
* {
* // access a, b, c, d, e, and ret directly
* ...
* }
*/
#define BPF_PROG2(name, args...) \
name(unsigned long long *ctx); \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \
typeof(name(0)) name(unsigned long long *ctx) \
{ \
return ____##name(ctx ___bpf_ctx_arg(args)); \
} \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
struct pt_regs;
#define ___bpf_kprobe_args0() ctx
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx)
#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx)
#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx)
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
/*
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and
* semantics of accessing kprobe input paremeters.
*
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
*/
#define BPF_KPROBE(name, args...) \
name(struct pt_regs *ctx); \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_kprobe_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
#define ___bpf_kretprobe_args0() ctx
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
/*
* BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
* return value (in addition to `struct pt_regs *ctx`), but no input
* arguments, because they will be clobbered by the time probed function
* returns.
*/
#define BPF_KRETPROBE(name, args...) \
name(struct pt_regs *ctx); \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_kretprobe_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
#define ___bpf_syscall_args0() ctx
#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs)
#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs)
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
#define ___bpf_syswrap_args0() ctx
#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs)
#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
/*
* BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
* tracing syscall functions, like __x64_sys_close. It hides the underlying
* platform-specific low-level way of getting syscall input arguments from
* struct pt_regs, and provides a familiar typed and named function arguments
* syntax and semantics of accessing syscall input parameters.
*
* Original struct pt_regs * context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
*
* At the moment BPF_KSYSCALL does not transparently handle all the calling
* convention quirks for the following syscalls:
*
* - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
* - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
* CONFIG_CLONE_BACKWARDS3.
* - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
* - compat syscalls.
*
* This may or may not change in the future. User needs to take extra measures
* to handle such quirks explicitly, if necessary.
*
* This macro relies on BPF CO-RE support and virtual __kconfig externs.
*/
#define BPF_KSYSCALL(name, args...) \
name(struct pt_regs *ctx); \
extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
? (struct pt_regs *)PT_REGS_PARM1(ctx) \
: ctx; \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
if (LINUX_HAS_SYSCALL_WRAPPER) \
return ____##name(___bpf_syswrap_args(args)); \
else \
return ____##name(___bpf_syscall_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
/* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE,
* but are named way less confusingly for SEC("uprobe") and SEC("uretprobe")
* use cases.
*/
#define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args)
#define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args)
#endif

View File

@ -0,0 +1,99 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"archive/tar"
"compress/gzip"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strings"
)
const (
libbpfVersion = "1.4.3"
prefix = "libbpf-" + libbpfVersion
)
var (
filesToExtract = map[string]struct{}{
prefix + "/LICENSE.BSD-2-Clause": {},
prefix + "/src/bpf_endian.h": {},
prefix + "/src/bpf_helper_defs.h": {},
prefix + "/src/bpf_helpers.h": {},
prefix + "/src/bpf_tracing.h": {},
}
)
var (
flagDest = flag.String("dest", ".", "destination directory")
)
// TODO(jwhited): go generate strategy for derp/xdp
func main() {
flag.Parse()
f, err := os.CreateTemp("", "libbpf")
if err != nil {
log.Panic(err)
}
defer os.Remove(f.Name())
resp, err := http.Get(fmt.Sprintf("https://github.com/libbpf/libbpf/archive/refs/tags/v%s.tar.gz", libbpfVersion))
if err != nil {
log.Panic(err)
}
defer resp.Body.Close()
_, err = io.Copy(f, resp.Body)
if err != nil {
log.Panic(err)
}
_, err = f.Seek(0, 0)
if err != nil {
log.Panic(err)
}
g, err := gzip.NewReader(f)
if err != nil {
log.Panic(err)
}
defer g.Close()
t := tar.NewReader(g)
seen := make(map[string]bool, len(filesToExtract))
for {
h, err := t.Next()
if err != nil {
log.Panic(err)
}
if strings.Contains(h.Name, "..") {
continue
}
_, ok := filesToExtract[h.Name]
if ok {
if seen[h.Name] {
log.Panicf("saw %s more than once in archive", h.Name)
}
seen[h.Name] = true
p := filepath.Join(*flagDest, filepath.Base(h.Name))
e, err := os.Create(p)
if err != nil {
log.Panic(err)
}
_, err = io.Copy(e, t)
if err != nil {
log.Panic(err)
}
if len(seen) == len(filesToExtract) {
break
}
}
}
}

585
derp/xdp/xdp.c Normal file
View File

@ -0,0 +1,585 @@
//go:build ignore
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <bpf_endian.h>
#include <bpf_helpers.h>
struct config {
// TODO(jwhited): if we add more fields consider endianness consistency in
// the context of the data. cilium/ebpf uses native endian encoding for map
// encoding even if we use big endian types here, e.g. __be16.
__u16 dst_port;
};
struct config *unused_config __attribute__((unused)); // required by bpf2go -type
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct config));
__uint(max_entries, 1);
} config_map SEC(".maps");
struct counters_key {
__u8 unused;
__u8 af;
__u8 pba;
__u8 prog_end;
};
struct counters_key *unused_counters_key __attribute__((unused)); // required by bpf2go -type
enum counter_key_af {
COUNTER_KEY_AF_UNKNOWN,
COUNTER_KEY_AF_IPV4,
COUNTER_KEY_AF_IPV6,
COUNTER_KEY_AF_LEN
};
enum counter_key_af *unused_counter_key_af __attribute__((unused)); // required by bpf2go -type
enum counter_key_packets_bytes_action {
COUNTER_KEY_PACKETS_PASS_TOTAL,
COUNTER_KEY_BYTES_PASS_TOTAL,
COUNTER_KEY_PACKETS_ABORTED_TOTAL,
COUNTER_KEY_BYTES_ABORTED_TOTAL,
COUNTER_KEY_PACKETS_TX_TOTAL,
COUNTER_KEY_BYTES_TX_TOTAL,
COUNTER_KEY_PACKETS_DROP_TOTAL,
COUNTER_KEY_BYTES_DROP_TOTAL,
COUNTER_KEY_PACKETS_BYTES_ACTION_LEN
};
enum counter_key_packets_bytes_action *unused_counter_key_packets_bytes_action __attribute__((unused)); // required by bpf2go -type
enum counter_key_prog_end {
COUNTER_KEY_END_UNSPECIFIED,
COUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR,
COUNTER_KEY_END_INVALID_UDP_CSUM,
COUNTER_KEY_END_INVALID_IP_CSUM,
COUNTER_KEY_END_NOT_STUN_PORT,
COUNTER_KEY_END_INVALID_SW_ATTR_VAL,
COUNTER_KEY_END_LEN
};
enum counter_key_prog_end *unused_counter_key_prog_end __attribute__((unused)); // required by bpf2go -type
#define COUNTERS_MAP_MAX_ENTRIES ((COUNTER_KEY_AF_LEN - 1) << 16) | \
((COUNTER_KEY_PACKETS_BYTES_ACTION_LEN - 1) << 8) | \
(COUNTER_KEY_END_LEN - 1)
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(key_size, sizeof(struct counters_key));
__uint(value_size, sizeof(__u64));
__uint(max_entries, COUNTERS_MAP_MAX_ENTRIES);
} counters_map SEC(".maps");
struct stunreq {
__be16 type;
__be16 length;
__be32 magic;
__be32 txid[3];
// attributes follow
};
struct stunattr {
__be16 num;
__be16 length;
};
struct stunxor {
__u8 unused;
__u8 family;
__be16 port;
__be32 addr;
};
struct stunxor6 {
__u8 unused;
__u8 family;
__be16 port;
__be32 addr[4];
};
#define STUN_BINDING_REQUEST 1
#define STUN_MAGIC 0x2112a442
#define STUN_ATTR_SW 0x8022
#define STUN_ATTR_XOR_MAPPED_ADDR 0x0020
#define STUN_BINDING_RESPONSE 0x0101
#define STUN_MAGIC_FOR_PORT_XOR 0x2112
#define MAX_UDP_LEN_IPV4 1480
#define MAX_UDP_LEN_IPV6 1460
#define IP_MF 0x2000
#define IP_OFFSET 0x1fff
static __always_inline __u16 csum_fold_flip(__u32 csum) {
__u32 sum;
sum = (csum >> 16) + (csum & 0xffff); // maximum value 0x1fffe
sum += (sum >> 16); // maximum value 0xffff
return ~sum;
}
// csum_const_size is an alternative to bpf_csum_diff. It's a verifier
// workaround for when we are forced to use a constant max_size + bounds
// checking. The alternative being passing a dynamic length to bpf_csum_diff
// {from,to}_size arguments, which the verifier can't follow. For further info
// see: https://github.com/iovisor/bcc/issues/2463#issuecomment-512503958
static __always_inline __u16 csum_const_size(__u32 seed, void* from, void* data_end, int max_size) {
__u16 *buf = from;
for (int i = 0; i < max_size; i += 2) {
if ((void *)(buf + 1) > data_end) {
break;
}
seed += *buf;
buf++;
}
if ((void *)buf + 1 <= data_end) {
seed += *(__u8 *)buf;
}
return csum_fold_flip(seed);
}
static __always_inline __u32 pseudo_sum_ipv6(struct ipv6hdr* ip6, __u16 udp_len) {
__u32 pseudo = 0; // TODO(jwhited): __u64 for intermediate checksum values to reduce number of ops
for (int i = 0; i < 8; i ++) {
pseudo += ip6->saddr.in6_u.u6_addr16[i];
pseudo += ip6->daddr.in6_u.u6_addr16[i];
}
pseudo += bpf_htons(ip6->nexthdr);
pseudo += udp_len;
return pseudo;
}
static __always_inline __u32 pseudo_sum_ipv4(struct iphdr* ip, __u16 udp_len) {
__u32 pseudo = (__u16)ip->saddr;
pseudo += (__u16)(ip->saddr >> 16);
pseudo += (__u16)ip->daddr;
pseudo += (__u16)(ip->daddr >> 16);
pseudo += bpf_htons(ip->protocol);
pseudo += udp_len;
return pseudo;
}
struct packet_context {
enum counter_key_af af;
enum counter_key_prog_end prog_end;
};
static __always_inline int inc_counter(struct counters_key key, __u64 val) {
__u64 *counter = bpf_map_lookup_elem(&counters_map, &key);
if (!counter) {
return bpf_map_update_elem(&counters_map, &key, &val, BPF_ANY);
}
*counter += val;
return bpf_map_update_elem(&counters_map, &key, counter, BPF_ANY);
}
static __always_inline int handle_counters(struct xdp_md *ctx, int action, struct packet_context *pctx) {
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u64 bytes = data_end - data;
enum counter_key_packets_bytes_action packets_pba = COUNTER_KEY_PACKETS_PASS_TOTAL;
enum counter_key_packets_bytes_action bytes_pba = COUNTER_KEY_BYTES_PASS_TOTAL;
switch (action) {
case XDP_ABORTED:
packets_pba = COUNTER_KEY_PACKETS_ABORTED_TOTAL;
bytes_pba = COUNTER_KEY_BYTES_ABORTED_TOTAL;
break;
case XDP_PASS:
packets_pba = COUNTER_KEY_PACKETS_PASS_TOTAL;
bytes_pba = COUNTER_KEY_BYTES_PASS_TOTAL;
break;
case XDP_TX:
packets_pba = COUNTER_KEY_PACKETS_TX_TOTAL;
bytes_pba = COUNTER_KEY_BYTES_TX_TOTAL;
break;
case XDP_DROP:
packets_pba = COUNTER_KEY_PACKETS_DROP_TOTAL;
bytes_pba = COUNTER_KEY_BYTES_DROP_TOTAL;
break;
}
struct counters_key packets_key = {
.af = pctx->af,
.pba = packets_pba,
.prog_end = pctx->prog_end,
};
struct counters_key bytes_key = {
.af = pctx->af,
.pba = bytes_pba,
.prog_end = pctx->prog_end,
};
inc_counter(packets_key, 1);
inc_counter(bytes_key, bytes);
return 0;
}
#define is_ipv6 (pctx->af == COUNTER_KEY_AF_IPV6)
static __always_inline int handle_packet(struct xdp_md *ctx, struct packet_context *pctx) {
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
pctx->af = COUNTER_KEY_AF_UNKNOWN;
pctx->prog_end = COUNTER_KEY_END_UNSPECIFIED;
struct ethhdr *eth = data;
if ((void *)(eth + 1) > data_end) {
return XDP_PASS;
}
struct iphdr *ip;
struct ipv6hdr *ip6;
struct udphdr *udp;
int validate_udp_csum;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
pctx->af = COUNTER_KEY_AF_IPV4;
ip = (void *)(eth + 1);
if ((void *)(ip + 1) > data_end) {
return XDP_PASS;
}
if (ip->ihl != 5 ||
ip->version != 4 ||
ip->protocol != IPPROTO_UDP ||
(ip->frag_off & bpf_htons(IP_MF | IP_OFFSET)) != 0) {
return XDP_PASS;
}
// validate ipv4 header checksum
__u32 cs_unfolded = bpf_csum_diff(0, 0, (void *)ip, sizeof(*ip), 0);
__u16 cs = csum_fold_flip(cs_unfolded);
if (cs != 0) {
pctx->prog_end = COUNTER_KEY_END_INVALID_IP_CSUM;
return XDP_PASS;
}
if (bpf_ntohs(ip->tot_len) != data_end - (void *)ip) {
return XDP_PASS;
}
udp = (void *)(ip + 1);
if ((void *)(udp + 1) > data_end) {
return XDP_PASS;
}
if (udp->check != 0) {
// https://datatracker.ietf.org/doc/html/rfc768#page-3
// If the computed checksum is zero, it is transmitted as all
// ones (the equivalent in one's complement arithmetic). An all
// zero transmitted checksum value means that the transmitter
// generated no checksum (for debugging or for higher level
// protocols that don't care).
validate_udp_csum = 1;
}
} else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
pctx->af = COUNTER_KEY_AF_IPV6;
ip6 = (void *)(eth + 1);
if ((void *)(ip6 + 1) > data_end) {
return XDP_PASS;
}
if (ip6->version != 6 || ip6->nexthdr != IPPROTO_UDP) {
return XDP_PASS;
}
udp = (void *)(ip6 + 1);
if ((void *)(udp + 1) > data_end) {
return XDP_PASS;
}
if (bpf_ntohs(ip6->payload_len) != data_end - (void *)udp) {
return XDP_PASS;
}
// https://datatracker.ietf.org/doc/html/rfc8200#page-28
// Unlike IPv4, the default behavior when UDP packets are
// originated by an IPv6 node is that the UDP checksum is not
// optional. That is, whenever originating a UDP packet, an IPv6
// node must compute a UDP checksum over the packet and the
// pseudo-header, and, if that computation yields a result of
// zero, it must be changed to hex FFFF for placement in the UDP
// header. IPv6 receivers must discard UDP packets containing a
// zero checksum and should log the error.
validate_udp_csum = 1;
} else {
return XDP_PASS;
}
__u32 config_key = 0;
struct config *c = bpf_map_lookup_elem(&config_map, &config_key);
if (!c) {
return XDP_PASS;
}
if (bpf_ntohs(udp->len) != data_end - (void *)udp) {
return XDP_PASS;
}
if (bpf_ntohs(udp->dest) != c->dst_port) {
pctx->prog_end = COUNTER_KEY_END_NOT_STUN_PORT;
return XDP_PASS;
}
if (validate_udp_csum) {
__u16 cs;
__u32 pseudo_sum;
if (is_ipv6) {
pseudo_sum = pseudo_sum_ipv6(ip6, udp->len);
cs = csum_const_size(pseudo_sum, udp, data_end, MAX_UDP_LEN_IPV6);
} else {
pseudo_sum = pseudo_sum_ipv4(ip, udp->len);
cs = csum_const_size(pseudo_sum, udp, data_end, MAX_UDP_LEN_IPV4);
}
if (cs != 0) {
pctx->prog_end = COUNTER_KEY_END_INVALID_UDP_CSUM;
return XDP_PASS;
}
}
struct stunreq *req = (void *)(udp + 1);
if ((void *)(req + 1) > data_end) {
return XDP_PASS;
}
if (req->type != bpf_htons(STUN_BINDING_REQUEST)) {
return XDP_PASS;
}
if (bpf_ntohl(req->magic) != STUN_MAGIC) {
return XDP_PASS;
}
void *attrs = (void *)(req + 1);
__u16 attrs_len = ((char *)data_end) - ((char *)attrs);
if (bpf_ntohs(req->length) != attrs_len) {
return XDP_PASS;
}
struct stunattr *sa = attrs;
if ((void *)(sa + 1) > data_end) {
return XDP_PASS;
}
// Assume the order and contents of attributes. We *could* loop through
// them, but parsing their lengths and performing arithmetic against the
// packet pointer is more pain than it's worth. Bounds checks are invisible
// to the verifier in certain circumstances where things move from registers
// to the stack and/or compilation optimizations remove them entirely. There
// have only ever been two attributes included by the client, and we are
// only interested in one of them, anyway. Verify the software attribute,
// but ignore the fingerprint attribute as it's only useful where STUN is
// multiplexed with other traffic on the same port/socket, which is not the
// case here.
void *attr_data = (void *)(sa + 1);
if (bpf_ntohs(sa->length) != 8 || bpf_ntohs(sa->num) != STUN_ATTR_SW) {
pctx->prog_end = COUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR;
return XDP_PASS;
}
if (attr_data + 8 > data_end) {
return XDP_PASS;
}
char want_sw[] = {0x74, 0x61, 0x69, 0x6c, 0x6e, 0x6f, 0x64, 0x65}; // tailnode
char *got_sw = attr_data;
for (int j = 0; j < 8; j++) {
if (got_sw[j] != want_sw[j]) {
pctx->prog_end = COUNTER_KEY_END_INVALID_SW_ATTR_VAL;
return XDP_PASS;
}
}
// Begin transforming packet into a STUN_BINDING_RESPONSE. From here
// onwards we return XDP_ABORTED instead of XDP_PASS when transformations or
// bounds checks fail as it would be nonsensical to pass a mangled packet
// through to the kernel, and we may be interested in debugging via
// tracepoint.
// Set success response and new length. Magic cookie and txid remain the
// same.
req->type = bpf_htons(STUN_BINDING_RESPONSE);
if (is_ipv6) {
req->length = bpf_htons(sizeof(struct stunattr) + sizeof(struct stunxor6));
} else {
req->length = bpf_htons(sizeof(struct stunattr) + sizeof(struct stunxor));
}
// Set attr type. Length remains unchanged, but set it again for future
// safety reasons.
sa->num = bpf_htons(STUN_ATTR_XOR_MAPPED_ADDR);
if (is_ipv6) {
sa->length = bpf_htons(sizeof(struct stunxor6));
} else {
sa->length = bpf_htons(sizeof(struct stunxor));
}
struct stunxor *xor;
struct stunxor6 *xor6;
// Adjust tail and reset header pointers.
int adjust_tail_by;
if (is_ipv6) {
xor6 = attr_data;
adjust_tail_by = (void *)(xor6 + 1) - data_end;
} else {
xor = attr_data;
adjust_tail_by = (void *)(xor + 1) - data_end;
}
if (bpf_xdp_adjust_tail(ctx, adjust_tail_by)) {
return XDP_ABORTED;
}
data_end = (void *)(long)ctx->data_end;
data = (void *)(long)ctx->data;
eth = data;
if ((void *)(eth + 1) > data_end) {
return XDP_ABORTED;
}
if (is_ipv6) {
ip6 = (void *)(eth + 1);
if ((void *)(ip6 + 1) > data_end) {
return XDP_ABORTED;
}
udp = (void *)(ip6 + 1);
if ((void *)(udp + 1) > data_end) {
return XDP_ABORTED;
}
} else {
ip = (void *)(eth + 1);
if ((void *)(ip + 1) > data_end) {
return XDP_ABORTED;
}
udp = (void *)(ip + 1);
if ((void *)(udp + 1) > data_end) {
return XDP_ABORTED;
}
}
req = (void *)(udp + 1);
if ((void *)(req + 1) > data_end) {
return XDP_ABORTED;
}
sa = (void *)(req + 1);
if ((void *)(sa + 1) > data_end) {
return XDP_ABORTED;
}
// Set attr data.
if (is_ipv6) {
xor6 = (void *)(sa + 1);
if ((void *)(xor6 + 1) > data_end) {
return XDP_ABORTED;
}
xor6->unused = 0x00; // unused byte
xor6->family = 0x02;
xor6->port = udp->source ^ bpf_htons(STUN_MAGIC_FOR_PORT_XOR);
xor6->addr[0] = ip6->saddr.in6_u.u6_addr32[0] ^ bpf_htonl(STUN_MAGIC);
for (int i = 1; i < 4; i++) {
// All three are __be32, no endianness flips.
xor6->addr[i] = ip6->saddr.in6_u.u6_addr32[i] ^ req->txid[i-1];
}
} else {
xor = (void *)(sa + 1);
if ((void *)(xor + 1) > data_end) {
return XDP_ABORTED;
}
xor->unused = 0x00; // unused byte
xor->family = 0x01;
xor->port = udp->source ^ bpf_htons(STUN_MAGIC_FOR_PORT_XOR);
xor->addr = ip->saddr ^ bpf_htonl(STUN_MAGIC);
}
// Flip ethernet header source and destination address.
__u8 eth_tmp[ETH_ALEN];
__builtin_memcpy(eth_tmp, eth->h_source, ETH_ALEN);
__builtin_memcpy(eth->h_source, eth->h_dest, ETH_ALEN);
__builtin_memcpy(eth->h_dest, eth_tmp, ETH_ALEN);
// Flip ip header source and destination address.
if (is_ipv6) {
struct in6_addr ip_tmp = ip6->saddr;
ip6->saddr = ip6->daddr;
ip6->daddr = ip_tmp;
} else {
__be32 ip_tmp = ip->saddr;
ip->saddr = ip->daddr;
ip->daddr = ip_tmp;
}
// Flip udp header source and destination ports;
__be16 port_tmp = udp->source;
udp->source = udp->dest;
udp->dest = port_tmp;
// Update total length, TTL, and checksum.
__u32 cs = 0;
if (is_ipv6) {
if ((void *)(ip6 +1) > data_end) {
return XDP_ABORTED;
}
__u16 payload_len = data_end - (void *)(ip6 + 1);
ip6->payload_len = bpf_htons(payload_len);
ip6->hop_limit = IPDEFTTL;
} else {
__u16 tot_len = data_end - (void *)ip;
ip->tot_len = bpf_htons(tot_len);
ip->ttl = IPDEFTTL;
ip->check = 0;
cs = bpf_csum_diff(0, 0, (void *)ip, sizeof(*ip), cs);
ip->check = csum_fold_flip(cs);
}
// Avoid dynamic length math against the packet pointer, which is just a big
// verifier headache. Instead sizeof() all the things.
int to_csum_len = sizeof(*udp) + sizeof(*req) + sizeof(*sa);
// Update udp header length and checksum.
if (is_ipv6) {
to_csum_len += sizeof(*xor6);
udp = (void *)(ip6 + 1);
if ((void *)(udp +1) > data_end) {
return XDP_ABORTED;
}
__u16 udp_len = data_end - (void *)udp;
udp->len = bpf_htons(udp_len);
udp->check = 0;
cs = pseudo_sum_ipv6(ip6, udp->len);
} else {
to_csum_len += sizeof(*xor);
udp = (void *)(ip + 1);
if ((void *)(udp +1) > data_end) {
return XDP_ABORTED;
}
__u16 udp_len = data_end - (void *)udp;
udp->len = bpf_htons(udp_len);
udp->check = 0;
cs = pseudo_sum_ipv4(ip, udp->len);
}
if ((void *)udp + to_csum_len > data_end) {
return XDP_ABORTED;
}
cs = bpf_csum_diff(0, 0, (void*)udp, to_csum_len, cs);
udp->check = csum_fold_flip(cs);
return XDP_TX;
}
#undef is_ipv6
SEC("xdp")
int xdp_prog_func(struct xdp_md *ctx) {
struct packet_context pctx = {
.af = COUNTER_KEY_AF_UNKNOWN,
.prog_end = COUNTER_KEY_END_UNSPECIFIED,
};
int action = XDP_PASS;
action = handle_packet(ctx, &pctx);
handle_counters(ctx, action, &pctx);
return action;
}

45
derp/xdp/xdp.go Normal file
View File

@ -0,0 +1,45 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package xdp
// XDPAttachFlags represents how XDP program will be attached to interface. This
// is a mirror of cilium/ebpf/link.AttachFlags, without pulling it in for
// non-Linux.
type XDPAttachFlags uint32
const (
// XDPDriverFallbackGenericMode attempts XDPDriverMode, and falls back to
// XDPGenericMode if the driver does not support XDP.
XDPDriverFallbackGenericMode = 0
)
const (
// XDPGenericMode (SKB) links XDP BPF program for drivers which do
// not yet support native XDP.
XDPGenericMode XDPAttachFlags = 1 << (iota + 1)
// XDPDriverMode links XDP BPF program into the drivers receive path.
XDPDriverMode
// XDPOffloadMode offloads the entire XDP BPF program into hardware.
XDPOffloadMode
)
// STUNServerConfig represents the configuration of a STUNServer.
type STUNServerConfig struct {
DeviceName string
DstPort int
AttachFlags XDPAttachFlags
// Return XDP verifier errors in their entirety. This is a multiline error
// that can be very long. Full verifier errors are primarily useful during
// development, but should be mostly unexpected in a production environment.
FullVerifierErr bool
}
type STUNServerOption interface {
apply(*stunServerOptions)
}
type stunServerOptions struct {
//lint:ignore U1000 used in xdp_linux_test.go
noAttach bool
}

28
derp/xdp/xdp_default.go Normal file
View File

@ -0,0 +1,28 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package xdp
import (
"errors"
"github.com/prometheus/client_golang/prometheus"
)
// STUNServer is unimplemented on these platforms, see xdp_linux.go.
type STUNServer struct {
}
func NewSTUNServer(config *STUNServerConfig, opts ...STUNServerOption) (*STUNServer, error) {
return nil, errors.New("unimplemented on this GOOS")
}
func (s *STUNServer) Close() error {
return errors.New("unimplemented on this GOOS")
}
func (s *STUNServer) Describe(descCh chan<- *prometheus.Desc) {}
func (s *STUNServer) Collect(metricCh chan<- prometheus.Metric) {}

282
derp/xdp/xdp_linux.go Normal file
View File

@ -0,0 +1,282 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package xdp
import (
"errors"
"fmt"
"log"
"math"
"net"
"sync"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/link"
"github.com/prometheus/client_golang/prometheus"
)
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type config -type counters_key -type counter_key_af -type counter_key_packets_bytes_action -type counter_key_prog_end bpf xdp.c -- -I headers
// STUNServer manages loading and unloading of an eBPF XDP program that serves
// the STUN protocol. It exports statistics for the XDP program via its
// implementation of the prometheus.Collector interface.
type STUNServer struct {
mu sync.Mutex
objs *bpfObjects
metrics *stunServerMetrics
}
//lint:ignore U1000 used in xdp_linux_test.go, which has a build tag
type noAttachOption struct{}
//lint:ignore u1000 Used in xdp_linux_test.go, which has a build tag
func (n noAttachOption) apply(opts *stunServerOptions) {
opts.noAttach = true
}
func (s *STUNServerConfig) validate() error {
if len(s.DeviceName) < 1 {
return errors.New("DeviceName is unspecified")
}
if s.DstPort < 0 || s.DstPort > math.MaxUint16 {
return errors.New("DstPort is outside of uint16 bounds")
}
return nil
}
// NewSTUNServer returns an instance of a STUNServer that has attached the STUN
// XDP program to the netdev and destination port specified by config.
func NewSTUNServer(config *STUNServerConfig, opts ...STUNServerOption) (*STUNServer, error) {
o := &stunServerOptions{}
for _, opt := range opts {
opt.apply(o)
}
err := config.validate()
if err != nil {
return nil, fmt.Errorf("invalid config: %v", err)
}
objs := new(bpfObjects)
err = loadBpfObjects(objs, nil)
if err != nil {
var ve *ebpf.VerifierError
if config.FullVerifierErr && errors.As(err, &ve) {
err = fmt.Errorf("verifier error: %+v", ve)
}
return nil, fmt.Errorf("error loading XDP program: %w", err)
}
server := &STUNServer{
objs: objs,
metrics: newSTUNServerMetrics(),
}
var key uint32
xdpConfig := bpfConfig{
DstPort: uint16(config.DstPort),
}
err = objs.ConfigMap.Put(key, &xdpConfig)
if err != nil {
return nil, fmt.Errorf("error loading config in eBPF map: %w", err)
}
if o.noAttach {
return server, nil
}
iface, err := net.InterfaceByName(config.DeviceName)
if err != nil {
return nil, fmt.Errorf("error finding device: %w", err)
}
_, err = link.AttachXDP(link.XDPOptions{
Program: objs.XdpProgFunc,
Interface: iface.Index,
Flags: link.XDPAttachFlags(config.AttachFlags),
})
if err != nil {
return nil, fmt.Errorf("error attaching XDP program to dev: %w", err)
}
return server, nil
}
// Close unloads the XDP program and associated maps.
func (s *STUNServer) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.objs.Close()
}
type stunServerMetrics struct {
last map[bpfCountersKey]uint64
registry *prometheus.Registry
packets *prometheus.CounterVec
bytes *prometheus.CounterVec
}
func newSTUNServerMetrics() *stunServerMetrics {
last := make(map[bpfCountersKey]uint64)
registry := prometheus.NewRegistry()
packets := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "xdp",
Subsystem: "stun_server",
Name: "packets_total",
}, []string{addressFamilyKey, xdpOutcomeKey, progEndKey})
bytes := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "xdp",
Subsystem: "stun_server",
Name: "bytes_total",
}, []string{addressFamilyKey, xdpOutcomeKey, progEndKey})
registry.MustRegister(packets, bytes)
return &stunServerMetrics{
last: last,
registry: registry,
packets: packets,
bytes: bytes,
}
}
const (
xdpOutcomeKey = "xdp_outcome"
progEndKey = "prog_end"
)
const (
xdpOutcomePass = "pass"
xdpOutcomeAborted = "aborted"
xdpOutcomeDrop = "drop"
xdpOutcomeTX = "tx"
)
func sum(vals []uint64) uint64 {
var s uint64
for _, v := range vals {
s += v
}
return s
}
const (
addressFamilyKey = "address_family"
)
const (
addressFamilyUnknown = "unknown"
addressFamilyIPv4 = "ipv4"
addressFamilyIPv6 = "ipv6"
)
var (
// TODO(jwhited): go generate these maps or equivalent switch logic behind bpf2go
pbaToOutcomeLV = map[bpfCounterKeyPacketsBytesAction]string{
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL: xdpOutcomePass,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL: xdpOutcomePass,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_ABORTED_TOTAL: xdpOutcomeAborted,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_ABORTED_TOTAL: xdpOutcomeAborted,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL: xdpOutcomeTX,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL: xdpOutcomeTX,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_DROP_TOTAL: xdpOutcomeDrop,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_DROP_TOTAL: xdpOutcomeDrop,
}
progEndLV = map[bpfCounterKeyProgEnd]string{
bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED: "unspecified",
bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR: "unexpected_first_stun_attr",
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM: "invalid_udp_csum",
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_IP_CSUM: "invalid_ip_csum",
bpfCounterKeyProgEndCOUNTER_KEY_END_NOT_STUN_PORT: "not_stun_port",
bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL: "invalid_sw_attr_val",
}
packetCounterKeys = map[bpfCounterKeyPacketsBytesAction]bool{
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_ABORTED_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_DROP_TOTAL: true,
}
//lint:ignore U1000 used in xdp_linux_test.go, which has a build tag
bytesCounterKeys = map[bpfCounterKeyPacketsBytesAction]bool{
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_ABORTED_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL: true,
bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_DROP_TOTAL: true,
}
)
// increase returns the difference between "from" and "to" assuming they
// originated from the same counter gathered at different times, i.e. "from"
// was incremented by a non-negative value into "to". In the case of wraps
// increase returns the difference between "to" and zero.
func increase(from, to uint64) uint64 {
if to >= from {
return to - from
}
return to
}
func (s *stunServerMetrics) updateFromMapKV(key bpfCountersKey, vals []uint64) error {
if key.Unused != 0 ||
key.Af >= uint8(bpfCounterKeyAfCOUNTER_KEY_AF_LEN) ||
key.Pba >= uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN) ||
key.ProgEnd >= uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_LEN) {
return fmt.Errorf("unexpected counter key: %+v", key)
}
previousAllCPUs := s.last[key]
allCPUs := sum(vals)
s.last[key] = allCPUs
inc := increase(previousAllCPUs, allCPUs)
if inc == 0 {
return nil
}
var af string
switch key.Af {
case uint8(bpfCounterKeyAfCOUNTER_KEY_AF_UNKNOWN):
af = addressFamilyUnknown
case uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4):
af = addressFamilyIPv4
case uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6):
af = addressFamilyIPv6
}
labels := prometheus.Labels{
addressFamilyKey: af,
xdpOutcomeKey: pbaToOutcomeLV[bpfCounterKeyPacketsBytesAction(key.Pba)],
progEndKey: progEndLV[bpfCounterKeyProgEnd(key.ProgEnd)],
}
var metric *prometheus.CounterVec
if packetCounterKeys[bpfCounterKeyPacketsBytesAction(key.Pba)] {
metric = s.packets
} else {
metric = s.bytes
}
metric.With(labels).Add(float64(inc))
return nil
}
// Describe is part of the implementation of prometheus.Collector.
func (s *STUNServer) Describe(descCh chan<- *prometheus.Desc) {
s.metrics.registry.Describe(descCh)
}
// Collect is part of the implementation of prometheus.Collector.
func (s *STUNServer) Collect(metricCh chan<- prometheus.Metric) {
err := s.updateMetrics()
if err != nil {
log.Printf("xdp: error collecting metrics: %v", err)
}
s.metrics.registry.Collect(metricCh)
}
func (s *STUNServer) updateMetrics() error {
s.mu.Lock()
defer s.mu.Unlock()
iter := s.objs.CountersMap.Iterate()
var key bpfCountersKey
numCPU, err := ebpf.PossibleCPU()
if err != nil {
return err
}
vals := make([]uint64, numCPU)
for iter.Next(&key, &vals) {
err := s.metrics.updateFromMapKV(key, vals)
if err != nil {
return err
}
}
return iter.Err()
}

975
derp/xdp/xdp_linux_test.go Normal file
View File

@ -0,0 +1,975 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package xdp
import (
"bytes"
"errors"
"fmt"
"net/netip"
"testing"
"github.com/cilium/ebpf"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/checksum"
"gvisor.dev/gvisor/pkg/tcpip/header"
"tailscale.com/net/stun"
)
type xdpAction uint32
func (x xdpAction) String() string {
switch x {
case xdpActionAborted:
return "XDP_ABORTED"
case xdpActionDrop:
return "XDP_DROP"
case xdpActionPass:
return "XDP_PASS"
case xdpActionTX:
return "XDP_TX"
case xdpActionRedirect:
return "XDP_REDIRECT"
default:
return fmt.Sprintf("unknown(%d)", x)
}
}
const (
xdpActionAborted xdpAction = iota
xdpActionDrop
xdpActionPass
xdpActionTX
xdpActionRedirect
)
const (
ethHLen = 14
udpHLen = 8
ipv4HLen = 20
ipv6HLen = 40
)
const (
defaultSTUNPort = 3478
defaultTTL = 64
reqSrcPort = uint16(1025)
)
var (
reqEthSrc = tcpip.LinkAddress([]byte{0x00, 0x00, 0x5e, 0x00, 0x53, 0x01})
reqEthDst = tcpip.LinkAddress([]byte{0x00, 0x00, 0x5e, 0x00, 0x53, 0x02})
reqIPv4Src = netip.MustParseAddr("192.0.2.1")
reqIPv4Dst = netip.MustParseAddr("192.0.2.2")
reqIPv6Src = netip.MustParseAddr("2001:db8::1")
reqIPv6Dst = netip.MustParseAddr("2001:db8::2")
)
var testTXID = stun.TxID([12]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b})
type ipv4Mutations struct {
ipHeaderFn func(header.IPv4)
udpHeaderFn func(header.UDP)
stunReqFn func([]byte)
}
func getIPv4STUNBindingReq(mutations *ipv4Mutations) []byte {
req := stun.Request(testTXID)
if mutations != nil && mutations.stunReqFn != nil {
mutations.stunReqFn(req)
}
payloadLen := len(req)
totalLen := ipv4HLen + udpHLen + payloadLen
b := make([]byte, ethHLen+totalLen)
ipv4H := header.IPv4(b[ethHLen:])
ethH := header.Ethernet(b)
ethFields := header.EthernetFields{
SrcAddr: reqEthSrc,
DstAddr: reqEthDst,
Type: unix.ETH_P_IP,
}
ethH.Encode(&ethFields)
ipFields := header.IPv4Fields{
SrcAddr: tcpip.AddrFrom4(reqIPv4Src.As4()),
DstAddr: tcpip.AddrFrom4(reqIPv4Dst.As4()),
Protocol: unix.IPPROTO_UDP,
TTL: defaultTTL,
TotalLength: uint16(totalLen),
}
ipv4H.Encode(&ipFields)
ipv4H.SetChecksum(^ipv4H.CalculateChecksum())
if mutations != nil && mutations.ipHeaderFn != nil {
mutations.ipHeaderFn(ipv4H)
}
udpH := header.UDP(b[ethHLen+ipv4HLen:])
udpFields := header.UDPFields{
SrcPort: reqSrcPort,
DstPort: defaultSTUNPort,
Length: uint16(udpHLen + payloadLen),
Checksum: 0,
}
udpH.Encode(&udpFields)
copy(b[ethHLen+ipv4HLen+udpHLen:], req)
cs := header.PseudoHeaderChecksum(
unix.IPPROTO_UDP,
ipv4H.SourceAddress(),
ipv4H.DestinationAddress(),
uint16(udpHLen+payloadLen),
)
cs = checksum.Checksum(req, cs)
udpH.SetChecksum(^udpH.CalculateChecksum(cs))
if mutations != nil && mutations.udpHeaderFn != nil {
mutations.udpHeaderFn(udpH)
}
return b
}
type ipv6Mutations struct {
ipHeaderFn func(header.IPv6)
udpHeaderFn func(header.UDP)
stunReqFn func([]byte)
}
func getIPv6STUNBindingReq(mutations *ipv6Mutations) []byte {
req := stun.Request(testTXID)
if mutations != nil && mutations.stunReqFn != nil {
mutations.stunReqFn(req)
}
payloadLen := len(req)
src := netip.MustParseAddr("2001:db8::1")
dst := netip.MustParseAddr("2001:db8::2")
b := make([]byte, ethHLen+ipv6HLen+udpHLen+payloadLen)
ipv6H := header.IPv6(b[ethHLen:])
ethH := header.Ethernet(b)
ethFields := header.EthernetFields{
SrcAddr: tcpip.LinkAddress([]byte{0x00, 0x00, 0x5e, 0x00, 0x53, 0x01}),
DstAddr: tcpip.LinkAddress([]byte{0x00, 0x00, 0x5e, 0x00, 0x53, 0x02}),
Type: unix.ETH_P_IPV6,
}
ethH.Encode(&ethFields)
ipFields := header.IPv6Fields{
SrcAddr: tcpip.AddrFrom16(src.As16()),
DstAddr: tcpip.AddrFrom16(dst.As16()),
TransportProtocol: unix.IPPROTO_UDP,
HopLimit: 64,
PayloadLength: uint16(udpHLen + payloadLen),
}
ipv6H.Encode(&ipFields)
if mutations != nil && mutations.ipHeaderFn != nil {
mutations.ipHeaderFn(ipv6H)
}
udpH := header.UDP(b[ethHLen+ipv6HLen:])
udpFields := header.UDPFields{
SrcPort: 1025,
DstPort: defaultSTUNPort,
Length: uint16(udpHLen + payloadLen),
Checksum: 0,
}
udpH.Encode(&udpFields)
copy(b[ethHLen+ipv6HLen+udpHLen:], req)
cs := header.PseudoHeaderChecksum(
unix.IPPROTO_UDP,
ipv6H.SourceAddress(),
ipv6H.DestinationAddress(),
uint16(udpHLen+payloadLen),
)
cs = checksum.Checksum(req, cs)
udpH.SetChecksum(^udpH.CalculateChecksum(cs))
if mutations != nil && mutations.udpHeaderFn != nil {
mutations.udpHeaderFn(udpH)
}
return b
}
func getIPv4STUNBindingResp() []byte {
addrPort := netip.AddrPortFrom(reqIPv4Src, reqSrcPort)
resp := stun.Response(testTXID, addrPort)
payloadLen := len(resp)
totalLen := ipv4HLen + udpHLen + payloadLen
b := make([]byte, ethHLen+totalLen)
ipv4H := header.IPv4(b[ethHLen:])
ethH := header.Ethernet(b)
ethFields := header.EthernetFields{
SrcAddr: reqEthDst,
DstAddr: reqEthSrc,
Type: unix.ETH_P_IP,
}
ethH.Encode(&ethFields)
ipFields := header.IPv4Fields{
SrcAddr: tcpip.AddrFrom4(reqIPv4Dst.As4()),
DstAddr: tcpip.AddrFrom4(reqIPv4Src.As4()),
Protocol: unix.IPPROTO_UDP,
TTL: defaultTTL,
TotalLength: uint16(totalLen),
}
ipv4H.Encode(&ipFields)
ipv4H.SetChecksum(^ipv4H.CalculateChecksum())
udpH := header.UDP(b[ethHLen+ipv4HLen:])
udpFields := header.UDPFields{
SrcPort: defaultSTUNPort,
DstPort: reqSrcPort,
Length: uint16(udpHLen + payloadLen),
Checksum: 0,
}
udpH.Encode(&udpFields)
copy(b[ethHLen+ipv4HLen+udpHLen:], resp)
cs := header.PseudoHeaderChecksum(
unix.IPPROTO_UDP,
ipv4H.SourceAddress(),
ipv4H.DestinationAddress(),
uint16(udpHLen+payloadLen),
)
cs = checksum.Checksum(resp, cs)
udpH.SetChecksum(^udpH.CalculateChecksum(cs))
return b
}
func getIPv6STUNBindingResp() []byte {
addrPort := netip.AddrPortFrom(reqIPv6Src, reqSrcPort)
resp := stun.Response(testTXID, addrPort)
payloadLen := len(resp)
totalLen := ipv6HLen + udpHLen + payloadLen
b := make([]byte, ethHLen+totalLen)
ipv6H := header.IPv6(b[ethHLen:])
ethH := header.Ethernet(b)
ethFields := header.EthernetFields{
SrcAddr: reqEthDst,
DstAddr: reqEthSrc,
Type: unix.ETH_P_IPV6,
}
ethH.Encode(&ethFields)
ipFields := header.IPv6Fields{
SrcAddr: tcpip.AddrFrom16(reqIPv6Dst.As16()),
DstAddr: tcpip.AddrFrom16(reqIPv6Src.As16()),
TransportProtocol: unix.IPPROTO_UDP,
HopLimit: defaultTTL,
PayloadLength: uint16(udpHLen + payloadLen),
}
ipv6H.Encode(&ipFields)
udpH := header.UDP(b[ethHLen+ipv6HLen:])
udpFields := header.UDPFields{
SrcPort: defaultSTUNPort,
DstPort: reqSrcPort,
Length: uint16(udpHLen + payloadLen),
Checksum: 0,
}
udpH.Encode(&udpFields)
copy(b[ethHLen+ipv6HLen+udpHLen:], resp)
cs := header.PseudoHeaderChecksum(
unix.IPPROTO_UDP,
ipv6H.SourceAddress(),
ipv6H.DestinationAddress(),
uint16(udpHLen+payloadLen),
)
cs = checksum.Checksum(resp, cs)
udpH.SetChecksum(^udpH.CalculateChecksum(cs))
return b
}
func TestXDP(t *testing.T) {
ipv4STUNBindingReqTX := getIPv4STUNBindingReq(nil)
ipv6STUNBindingReqTX := getIPv6STUNBindingReq(nil)
ipv4STUNBindingReqIPCsumPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
oldCS := ipv4H.Checksum()
newCS := oldCS
for newCS == 0 || newCS == oldCS {
newCS++
}
ipv4H.SetChecksum(newCS)
},
})
ipv4STUNBindingReqIHLPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H[0] &= 0xF0
},
})
ipv4STUNBindingReqIPVerPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H[0] &= 0x0F
},
})
ipv4STUNBindingReqIPProtoPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H[9] = unix.IPPROTO_TCP
},
})
ipv4STUNBindingReqFragOffsetPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H.SetFlagsFragmentOffset(ipv4H.Flags(), 8)
},
})
ipv4STUNBindingReqFlagsMFPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H.SetFlagsFragmentOffset(header.IPv4FlagMoreFragments, 0)
},
})
ipv4STUNBindingReqTotLenPass := getIPv4STUNBindingReq(&ipv4Mutations{
ipHeaderFn: func(ipv4H header.IPv4) {
ipv4H.SetTotalLength(ipv4H.TotalLength() + 1)
ipv4H.SetChecksum(0)
ipv4H.SetChecksum(^ipv4H.CalculateChecksum())
},
})
ipv6STUNBindingReqIPVerPass := getIPv6STUNBindingReq(&ipv6Mutations{
ipHeaderFn: func(ipv6H header.IPv6) {
ipv6H[0] &= 0x0F
},
udpHeaderFn: func(udp header.UDP) {},
})
ipv6STUNBindingReqNextHdrPass := getIPv6STUNBindingReq(&ipv6Mutations{
ipHeaderFn: func(ipv6H header.IPv6) {
ipv6H.SetNextHeader(unix.IPPROTO_TCP)
},
udpHeaderFn: func(udp header.UDP) {},
})
ipv6STUNBindingReqPayloadLenPass := getIPv6STUNBindingReq(&ipv6Mutations{
ipHeaderFn: func(ipv6H header.IPv6) {
ipv6H.SetPayloadLength(ipv6H.PayloadLength() + 1)
},
udpHeaderFn: func(udp header.UDP) {},
})
ipv4STUNBindingReqUDPCsumPass := getIPv4STUNBindingReq(&ipv4Mutations{
udpHeaderFn: func(udpH header.UDP) {
oldCS := udpH.Checksum()
newCS := oldCS
for newCS == 0 || newCS == oldCS {
newCS++
}
udpH.SetChecksum(newCS)
},
})
ipv6STUNBindingReqUDPCsumPass := getIPv6STUNBindingReq(&ipv6Mutations{
udpHeaderFn: func(udpH header.UDP) {
oldCS := udpH.Checksum()
newCS := oldCS
for newCS == 0 || newCS == oldCS {
newCS++
}
udpH.SetChecksum(newCS)
},
})
ipv4STUNBindingReqSTUNTypePass := getIPv4STUNBindingReq(&ipv4Mutations{
stunReqFn: func(req []byte) {
req[1] = ^req[1]
},
})
ipv6STUNBindingReqSTUNTypePass := getIPv6STUNBindingReq(&ipv6Mutations{
stunReqFn: func(req []byte) {
req[1] = ^req[1]
},
})
ipv4STUNBindingReqSTUNMagicPass := getIPv4STUNBindingReq(&ipv4Mutations{
stunReqFn: func(req []byte) {
req[4] = ^req[4]
},
})
ipv6STUNBindingReqSTUNMagicPass := getIPv6STUNBindingReq(&ipv6Mutations{
stunReqFn: func(req []byte) {
req[4] = ^req[4]
},
})
ipv4STUNBindingReqSTUNAttrsLenPass := getIPv4STUNBindingReq(&ipv4Mutations{
stunReqFn: func(req []byte) {
req[2] = ^req[2]
},
})
ipv6STUNBindingReqSTUNAttrsLenPass := getIPv6STUNBindingReq(&ipv6Mutations{
stunReqFn: func(req []byte) {
req[2] = ^req[2]
},
})
ipv4STUNBindingReqSTUNSWValPass := getIPv4STUNBindingReq(&ipv4Mutations{
stunReqFn: func(req []byte) {
req[24] = ^req[24]
},
})
ipv6STUNBindingReqSTUNSWValPass := getIPv6STUNBindingReq(&ipv6Mutations{
stunReqFn: func(req []byte) {
req[24] = ^req[24]
},
})
ipv4STUNBindingReqSTUNFirstAttrPass := getIPv4STUNBindingReq(&ipv4Mutations{
stunReqFn: func(req []byte) {
req[21] = ^req[21]
},
})
ipv6STUNBindingReqSTUNFirstAttrPass := getIPv6STUNBindingReq(&ipv6Mutations{
stunReqFn: func(req []byte) {
req[21] = ^req[21]
},
})
cases := []struct {
name string
packetIn []byte
wantCode xdpAction
wantPacketOut []byte
wantMetrics map[bpfCountersKey]uint64
}{
{
name: "ipv4 STUN Binding Request TX",
packetIn: ipv4STUNBindingReqTX,
wantCode: xdpActionTX,
wantPacketOut: getIPv4STUNBindingResp(),
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(getIPv4STUNBindingResp())),
},
},
{
name: "ipv6 STUN Binding Request TX",
packetIn: ipv6STUNBindingReqTX,
wantCode: xdpActionTX,
wantPacketOut: getIPv6STUNBindingResp(),
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_TX_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_TX_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(getIPv6STUNBindingResp())),
},
},
{
name: "ipv4 STUN Binding Request invalid ip csum PASS",
packetIn: ipv4STUNBindingReqIPCsumPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqIPCsumPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_IP_CSUM),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_IP_CSUM),
}: uint64(len(ipv4STUNBindingReqIPCsumPass)),
},
},
{
name: "ipv4 STUN Binding Request ihl PASS",
packetIn: ipv4STUNBindingReqIHLPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqIHLPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqIHLPass)),
},
},
{
name: "ipv4 STUN Binding Request ip version PASS",
packetIn: ipv4STUNBindingReqIPVerPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqIPVerPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqIPVerPass)),
},
},
{
name: "ipv4 STUN Binding Request ip proto PASS",
packetIn: ipv4STUNBindingReqIPProtoPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqIPProtoPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqIPProtoPass)),
},
},
{
name: "ipv4 STUN Binding Request frag offset PASS",
packetIn: ipv4STUNBindingReqFragOffsetPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqFragOffsetPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqFragOffsetPass)),
},
},
{
name: "ipv4 STUN Binding Request flags mf PASS",
packetIn: ipv4STUNBindingReqFlagsMFPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqFlagsMFPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqFlagsMFPass)),
},
},
{
name: "ipv4 STUN Binding Request tot len PASS",
packetIn: ipv4STUNBindingReqTotLenPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqTotLenPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqTotLenPass)),
},
},
{
name: "ipv6 STUN Binding Request ip version PASS",
packetIn: ipv6STUNBindingReqIPVerPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqIPVerPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqIPVerPass)),
},
},
{
name: "ipv6 STUN Binding Request next hdr PASS",
packetIn: ipv6STUNBindingReqNextHdrPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqNextHdrPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqNextHdrPass)),
},
},
{
name: "ipv6 STUN Binding Request payload len PASS",
packetIn: ipv6STUNBindingReqPayloadLenPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqPayloadLenPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqPayloadLenPass)),
},
},
{
name: "ipv4 STUN Binding Request UDP csum PASS",
packetIn: ipv4STUNBindingReqUDPCsumPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqUDPCsumPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM),
}: uint64(len(ipv4STUNBindingReqUDPCsumPass)),
},
},
{
name: "ipv6 STUN Binding Request UDP csum PASS",
packetIn: ipv6STUNBindingReqUDPCsumPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqUDPCsumPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_UDP_CSUM),
}: uint64(len(ipv6STUNBindingReqUDPCsumPass)),
},
},
{
name: "ipv4 STUN Binding Request STUN type PASS",
packetIn: ipv4STUNBindingReqSTUNTypePass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqSTUNTypePass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqSTUNTypePass)),
},
},
{
name: "ipv6 STUN Binding Request STUN type PASS",
packetIn: ipv6STUNBindingReqSTUNTypePass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqSTUNTypePass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqSTUNTypePass)),
},
},
{
name: "ipv4 STUN Binding Request STUN magic PASS",
packetIn: ipv4STUNBindingReqSTUNMagicPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqSTUNMagicPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqSTUNMagicPass)),
},
},
{
name: "ipv6 STUN Binding Request STUN magic PASS",
packetIn: ipv6STUNBindingReqSTUNMagicPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqSTUNMagicPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqSTUNMagicPass)),
},
},
{
name: "ipv4 STUN Binding Request STUN attrs len PASS",
packetIn: ipv4STUNBindingReqSTUNAttrsLenPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqSTUNAttrsLenPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv4STUNBindingReqSTUNAttrsLenPass)),
},
},
{
name: "ipv6 STUN Binding Request STUN attrs len PASS",
packetIn: ipv6STUNBindingReqSTUNAttrsLenPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqSTUNAttrsLenPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNSPECIFIED),
}: uint64(len(ipv6STUNBindingReqSTUNAttrsLenPass)),
},
},
{
name: "ipv4 STUN Binding Request STUN SW val PASS",
packetIn: ipv4STUNBindingReqSTUNSWValPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqSTUNSWValPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL),
}: uint64(len(ipv4STUNBindingReqSTUNSWValPass)),
},
},
{
name: "ipv6 STUN Binding Request STUN SW val PASS",
packetIn: ipv6STUNBindingReqSTUNSWValPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqSTUNSWValPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_INVALID_SW_ATTR_VAL),
}: uint64(len(ipv6STUNBindingReqSTUNSWValPass)),
},
},
{
name: "ipv4 STUN Binding Request STUN first attr PASS",
packetIn: ipv4STUNBindingReqSTUNFirstAttrPass,
wantCode: xdpActionPass,
wantPacketOut: ipv4STUNBindingReqSTUNFirstAttrPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV4),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR),
}: uint64(len(ipv4STUNBindingReqSTUNFirstAttrPass)),
},
},
{
name: "ipv6 STUN Binding Request STUN first attr PASS",
packetIn: ipv6STUNBindingReqSTUNFirstAttrPass,
wantCode: xdpActionPass,
wantPacketOut: ipv6STUNBindingReqSTUNFirstAttrPass,
wantMetrics: map[bpfCountersKey]uint64{
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR),
}: 1,
{
Af: uint8(bpfCounterKeyAfCOUNTER_KEY_AF_IPV6),
Pba: uint8(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_BYTES_PASS_TOTAL),
ProgEnd: uint8(bpfCounterKeyProgEndCOUNTER_KEY_END_UNEXPECTED_FIRST_STUN_ATTR),
}: uint64(len(ipv6STUNBindingReqSTUNFirstAttrPass)),
},
},
}
server, err := NewSTUNServer(&STUNServerConfig{DeviceName: "fake", DstPort: defaultSTUNPort},
&noAttachOption{})
if err != nil {
if errors.Is(err, unix.EPERM) {
// TODO(jwhited): get this running
t.Skip("skipping due to EPERM error; test requires elevated privileges")
}
t.Fatalf("error constructing STUN server: %v", err)
}
defer server.Close()
clearCounters := func() error {
server.metrics.last = make(map[bpfCountersKey]uint64)
var cur, next bpfCountersKey
keys := make([]bpfCountersKey, 0)
for err = server.objs.CountersMap.NextKey(nil, &next); ; err = server.objs.CountersMap.NextKey(cur, &next) {
if err != nil {
if errors.Is(err, ebpf.ErrKeyNotExist) {
break
}
return err
}
keys = append(keys, next)
cur = next
}
for _, key := range keys {
err = server.objs.CountersMap.Delete(&key)
if err != nil {
return err
}
}
err = server.objs.CountersMap.NextKey(nil, &next)
if !errors.Is(err, ebpf.ErrKeyNotExist) {
return errors.New("counters map is not empty")
}
return nil
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
err = clearCounters()
if err != nil {
t.Fatalf("error clearing counters: %v", err)
}
opts := ebpf.RunOptions{
Data: c.packetIn,
DataOut: make([]byte, 1514),
}
got, err := server.objs.XdpProgFunc.Run(&opts)
if err != nil {
t.Fatalf("error running program: %v", err)
}
if xdpAction(got) != c.wantCode {
t.Fatalf("got code: %s != %s", xdpAction(got), c.wantCode)
}
if !bytes.Equal(opts.DataOut, c.wantPacketOut) {
t.Fatal("packets not equal")
}
err = server.updateMetrics()
if err != nil {
t.Fatalf("error updating metrics: %v", err)
}
if c.wantMetrics != nil {
for k, v := range c.wantMetrics {
gotCounter, ok := server.metrics.last[k]
if !ok {
t.Errorf("expected counter at key %+v not found", k)
}
if gotCounter != v {
t.Errorf("key: %+v gotCounter: %d != %d", k, gotCounter, v)
}
}
for k := range server.metrics.last {
_, ok := c.wantMetrics[k]
if !ok {
t.Errorf("counter at key: %+v incremented unexpectedly", k)
}
}
}
})
}
}
func TestCountersMapKey(t *testing.T) {
if bpfCounterKeyAfCOUNTER_KEY_AF_LEN > 256 {
t.Error("COUNTER_KEY_AF_LEN no longer fits within uint8")
}
if bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN > 256 {
t.Error("COUNTER_KEY_PACKETS_BYTES_ACTION no longer fits within uint8")
}
if bpfCounterKeyProgEndCOUNTER_KEY_END_LEN > 256 {
t.Error("COUNTER_KEY_END_LEN no longer fits within uint8")
}
if len(pbaToOutcomeLV) != int(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN) {
t.Error("pbaToOutcomeLV is not in sync with xdp.c")
}
if len(progEndLV) != int(bpfCounterKeyProgEndCOUNTER_KEY_END_LEN) {
t.Error("progEndLV is not in sync with xdp.c")
}
if len(packetCounterKeys)+len(bytesCounterKeys) != int(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN) {
t.Error("packetCounterKeys and/or bytesCounterKeys is not in sync with xdp.c")
}
if len(pbaToOutcomeLV) != int(bpfCounterKeyPacketsBytesActionCOUNTER_KEY_PACKETS_BYTES_ACTION_LEN) {
t.Error("pbaToOutcomeLV is not in sync with xdp.c")
}
}

5
go.mod
View File

@ -16,6 +16,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7
github.com/bramvdbogaerde/go-scp v1.4.0
github.com/cilium/ebpf v0.15.0
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/creack/pty v1.1.21
@ -65,8 +66,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.6
github.com/prometheus-community/pro-bing v0.4.0
github.com/prometheus/client_golang v1.18.0
github.com/prometheus/common v0.46.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/common v0.48.0
github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff
github.com/safchain/ethtool v0.3.0
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e

14
go.sum
View File

@ -206,8 +206,8 @@ github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXa
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4=
github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM=
github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk=
github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
@ -342,6 +342,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
@ -758,8 +760,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -770,8 +772,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=

View File

@ -283,3 +283,19 @@ func TestResponse(t *testing.T) {
}
}
}
func TestAttrOrderForXdpDERP(t *testing.T) {
// package derp/xdp assumes attribute order. This test ensures we don't
// drift and break that assumption.
txID := stun.NewTxID()
req := stun.Request(txID)
if len(req) < 20+12 {
t.Fatal("too short")
}
if !bytes.Equal(req[20:22], []byte{0x80, 0x22}) {
t.Fatal("the first attr is not of type software")
}
if !bytes.Equal(req[24:32], []byte("tailnode")) {
t.Fatal("unexpected software attr value")
}
}

View File

@ -56,6 +56,9 @@ for file in $(find $1 \( -name '*.go' -or -name '*.tsx' -or -name '*.ts' -not -n
# Generated kube deepcopy funcs file starts with a Go build tag + an empty line
header="$(head -5 $file | tail -n+3 )"
;;
$1/derp/xdp/bpf_bpfe*.go)
# Generated eBPF management code
;;
*)
header="$(head -2 $file)"
;;

View File

@ -28,7 +28,7 @@ func Handler(w http.ResponseWriter, r *http.Request) {
// gatherNativePrometheusMetrics writes metrics from the default
// metric registry in text format.
func gatherNativePrometheusMetrics(w http.ResponseWriter) error {
enc := expfmt.NewEncoder(w, expfmt.FmtText)
enc := expfmt.NewEncoder(w, expfmt.NewFormat(expfmt.TypeTextPlain))
mfs, err := prometheus.DefaultGatherer.Gather()
if err != nil {
return fmt.Errorf("could not gather metrics from DefaultGatherer: %w", err)