mirror of
https://github.com/tailscale/tailscale.git
synced 2025-07-30 07:43:42 +00:00
Merge branch 'main' into dylan/derp-hosting-provider
This commit is contained in:
commit
058a44558d
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
@ -55,7 +55,7 @@ jobs:
|
|||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19
|
uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
@ -66,7 +66,7 @@ jobs:
|
|||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19
|
uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
# 📚 https://git.io/JvXDl
|
# 📚 https://git.io/JvXDl
|
||||||
@ -80,4 +80,4 @@ jobs:
|
|||||||
# make release
|
# make release
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19
|
uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||||
|
260
.github/workflows/test.yml
vendored
260
.github/workflows/test.yml
vendored
@ -15,6 +15,10 @@ env:
|
|||||||
# - false: we expect fuzzing to be happy, and should report failure if it's not.
|
# - false: we expect fuzzing to be happy, and should report failure if it's not.
|
||||||
# - true: we expect fuzzing is broken, and should report failure if it start working.
|
# - true: we expect fuzzing is broken, and should report failure if it start working.
|
||||||
TS_FUZZ_CURRENTLY_BROKEN: false
|
TS_FUZZ_CURRENTLY_BROKEN: false
|
||||||
|
# GOMODCACHE is the same definition on all OSes. Within the workspace, we use
|
||||||
|
# toplevel directories "src" (for the checked out source code), and "gomodcache"
|
||||||
|
# and other caches as siblings to follow.
|
||||||
|
GOMODCACHE: ${{ github.workspace }}/gomodcache
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@ -38,8 +42,42 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
gomod-cache:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
outputs:
|
||||||
|
cache-key: ${{ steps.hash.outputs.key }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Compute cache key from go.{mod,sum}
|
||||||
|
id: hash
|
||||||
|
run: echo "key=gomod-cross3-${{ hashFiles('src/go.mod', 'src/go.sum') }}" >> $GITHUB_OUTPUT
|
||||||
|
# See if the cache entry already exists to avoid downloading it
|
||||||
|
# and doing the cache write again.
|
||||||
|
- id: check-cache
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
path: gomodcache # relative to workspace; see env note at top of file
|
||||||
|
key: ${{ steps.hash.outputs.key }}
|
||||||
|
lookup-only: true
|
||||||
|
enableCrossOsArchive: true
|
||||||
|
- name: Download modules
|
||||||
|
if: steps.check-cache.outputs.cache-hit != 'true'
|
||||||
|
working-directory: src
|
||||||
|
run: go mod download
|
||||||
|
- name: Cache Go modules
|
||||||
|
if: steps.check-cache.outputs.cache-hit != 'true'
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache # relative to workspace; see env note at top of file
|
||||||
|
key: ${{ steps.hash.outputs.key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
|
|
||||||
race-root-integration:
|
race-root-integration:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # don't abort the entire matrix if one element fails
|
fail-fast: false # don't abort the entire matrix if one element fails
|
||||||
matrix:
|
matrix:
|
||||||
@ -51,9 +89,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build test wrapper
|
- name: build test wrapper
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||||
- name: integration tests as root
|
- name: integration tests as root
|
||||||
|
working-directory: src
|
||||||
run: PATH=$PWD/tool:$PATH /tmp/testwrapper -exec "sudo -E" -race ./tstest/integration/
|
run: PATH=$PWD/tool:$PATH /tmp/testwrapper -exec "sudo -E" -race ./tstest/integration/
|
||||||
env:
|
env:
|
||||||
TS_TEST_SHARD: ${{ matrix.shard }}
|
TS_TEST_SHARD: ${{ matrix.shard }}
|
||||||
@ -75,9 +123,18 @@ jobs:
|
|||||||
shard: '3/3'
|
shard: '3/3'
|
||||||
- goarch: "386" # thanks yaml
|
- goarch: "386" # thanks yaml
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: Restore Cache
|
- name: Restore Cache
|
||||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
@ -87,7 +144,6 @@ jobs:
|
|||||||
# fetched and extracted by tar
|
# fetched and extracted by tar
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
~/go/pkg/mod/cache
|
|
||||||
~\AppData\Local\go-build
|
~\AppData\Local\go-build
|
||||||
# The -2- here should be incremented when the scheme of data to be
|
# The -2- here should be incremented when the scheme of data to be
|
||||||
# cached changes (e.g. path above changes).
|
# cached changes (e.g. path above changes).
|
||||||
@ -97,11 +153,13 @@ jobs:
|
|||||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
|
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
|
||||||
- name: build all
|
- name: build all
|
||||||
if: matrix.buildflags == '' # skip on race builder
|
if: matrix.buildflags == '' # skip on race builder
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build ${{matrix.buildflags}} ./...
|
run: ./tool/go build ${{matrix.buildflags}} ./...
|
||||||
env:
|
env:
|
||||||
GOARCH: ${{ matrix.goarch }}
|
GOARCH: ${{ matrix.goarch }}
|
||||||
- name: build variant CLIs
|
- name: build variant CLIs
|
||||||
if: matrix.buildflags == '' # skip on race builder
|
if: matrix.buildflags == '' # skip on race builder
|
||||||
|
working-directory: src
|
||||||
run: |
|
run: |
|
||||||
export TS_USE_TOOLCHAIN=1
|
export TS_USE_TOOLCHAIN=1
|
||||||
./build_dist.sh --extra-small ./cmd/tailscaled
|
./build_dist.sh --extra-small ./cmd/tailscaled
|
||||||
@ -116,19 +174,24 @@ jobs:
|
|||||||
sudo apt-get -y update
|
sudo apt-get -y update
|
||||||
sudo apt-get -y install qemu-user
|
sudo apt-get -y install qemu-user
|
||||||
- name: build test wrapper
|
- name: build test wrapper
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||||
- name: test all
|
- name: test all
|
||||||
|
working-directory: src
|
||||||
run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}}
|
run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}}
|
||||||
env:
|
env:
|
||||||
GOARCH: ${{ matrix.goarch }}
|
GOARCH: ${{ matrix.goarch }}
|
||||||
TS_TEST_SHARD: ${{ matrix.shard }}
|
TS_TEST_SHARD: ${{ matrix.shard }}
|
||||||
- name: bench all
|
- name: bench all
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done)
|
run: ./tool/go test ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$ $(for x in $(git grep -l "^func Benchmark" | xargs dirname | sort | uniq); do echo "./$x"; done)
|
||||||
env:
|
env:
|
||||||
GOARCH: ${{ matrix.goarch }}
|
GOARCH: ${{ matrix.goarch }}
|
||||||
- name: check that no tracked files changed
|
- name: check that no tracked files changed
|
||||||
|
working-directory: src
|
||||||
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
||||||
- name: check that no new files were added
|
- name: check that no new files were added
|
||||||
|
working-directory: src
|
||||||
run: |
|
run: |
|
||||||
# Note: The "error: pathspec..." you see below is normal!
|
# Note: The "error: pathspec..." you see below is normal!
|
||||||
# In the success case in which there are no new untracked files,
|
# In the success case in which there are no new untracked files,
|
||||||
@ -140,22 +203,33 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
- name: Tidy cache
|
- name: Tidy cache
|
||||||
|
working-directory: src
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
find $(go env GOCACHE) -type f -mmin +90 -delete
|
find $(go env GOCACHE) -type f -mmin +90 -delete
|
||||||
find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-2022
|
runs-on: windows-2022
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: src/go.mod
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
|
|
||||||
- name: Restore Cache
|
- name: Restore Cache
|
||||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
@ -165,7 +239,6 @@ jobs:
|
|||||||
# fetched and extracted by tar
|
# fetched and extracted by tar
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
~/go/pkg/mod/cache
|
|
||||||
~\AppData\Local\go-build
|
~\AppData\Local\go-build
|
||||||
# The -2- here should be incremented when the scheme of data to be
|
# The -2- here should be incremented when the scheme of data to be
|
||||||
# cached changes (e.g. path above changes).
|
# cached changes (e.g. path above changes).
|
||||||
@ -174,19 +247,22 @@ jobs:
|
|||||||
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||||
${{ github.job }}-${{ runner.os }}-go-2-
|
${{ github.job }}-${{ runner.os }}-go-2-
|
||||||
- name: test
|
- name: test
|
||||||
|
working-directory: src
|
||||||
run: go run ./cmd/testwrapper ./...
|
run: go run ./cmd/testwrapper ./...
|
||||||
- name: bench all
|
- name: bench all
|
||||||
|
working-directory: src
|
||||||
# Don't use -bench=. -benchtime=1x.
|
# Don't use -bench=. -benchtime=1x.
|
||||||
# Somewhere in the layers (powershell?)
|
# Somewhere in the layers (powershell?)
|
||||||
# the equals signs cause great confusion.
|
# the equals signs cause great confusion.
|
||||||
run: go test ./... -bench . -benchtime 1x -run "^$"
|
run: go test ./... -bench . -benchtime 1x -run "^$"
|
||||||
- name: Tidy cache
|
- name: Tidy cache
|
||||||
|
working-directory: src
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
find $(go env GOCACHE) -type f -mmin +90 -delete
|
find $(go env GOCACHE) -type f -mmin +90 -delete
|
||||||
find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete
|
|
||||||
|
|
||||||
privileged:
|
privileged:
|
||||||
|
needs: gomod-cache
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
container:
|
container:
|
||||||
image: golang:latest
|
image: golang:latest
|
||||||
@ -194,36 +270,47 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: chown
|
- name: chown
|
||||||
|
working-directory: src
|
||||||
run: chown -R $(id -u):$(id -g) $PWD
|
run: chown -R $(id -u):$(id -g) $PWD
|
||||||
- name: privileged tests
|
- name: privileged tests
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go test ./util/linuxfw ./derp/xdp
|
run: ./tool/go test ./util/linuxfw ./derp/xdp
|
||||||
|
|
||||||
vm:
|
vm:
|
||||||
|
needs: gomod-cache
|
||||||
runs-on: ["self-hosted", "linux", "vm"]
|
runs-on: ["self-hosted", "linux", "vm"]
|
||||||
# VM tests run with some privileges, don't let them run on 3p PRs.
|
# VM tests run with some privileges, don't let them run on 3p PRs.
|
||||||
if: github.repository == 'tailscale/tailscale'
|
if: github.repository == 'tailscale/tailscale'
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: Run VM tests
|
- name: Run VM tests
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
||||||
env:
|
env:
|
||||||
HOME: "/var/lib/ghrunner/home"
|
HOME: "/var/lib/ghrunner/home"
|
||||||
TMPDIR: "/tmp"
|
TMPDIR: "/tmp"
|
||||||
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
|
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
|
||||||
|
|
||||||
race-build:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- name: checkout
|
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
- name: build all
|
|
||||||
run: ./tool/go install -race ./cmd/...
|
|
||||||
- name: build tests
|
|
||||||
run: ./tool/go test -race -exec=true ./...
|
|
||||||
|
|
||||||
cross: # cross-compile checks, build only.
|
cross: # cross-compile checks, build only.
|
||||||
|
needs: gomod-cache
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # don't abort the entire matrix if one element fails
|
fail-fast: false # don't abort the entire matrix if one element fails
|
||||||
matrix:
|
matrix:
|
||||||
@ -262,6 +349,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
- name: Restore Cache
|
- name: Restore Cache
|
||||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
@ -271,7 +360,6 @@ jobs:
|
|||||||
# fetched and extracted by tar
|
# fetched and extracted by tar
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
~/go/pkg/mod/cache
|
|
||||||
~\AppData\Local\go-build
|
~\AppData\Local\go-build
|
||||||
# The -2- here should be incremented when the scheme of data to be
|
# The -2- here should be incremented when the scheme of data to be
|
||||||
# cached changes (e.g. path above changes).
|
# cached changes (e.g. path above changes).
|
||||||
@ -279,7 +367,14 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
|
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
|
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build all
|
- name: build all
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build ./cmd/...
|
run: ./tool/go build ./cmd/...
|
||||||
env:
|
env:
|
||||||
GOOS: ${{ matrix.goos }}
|
GOOS: ${{ matrix.goos }}
|
||||||
@ -287,30 +382,42 @@ jobs:
|
|||||||
GOARM: ${{ matrix.goarm }}
|
GOARM: ${{ matrix.goarm }}
|
||||||
CGO_ENABLED: "0"
|
CGO_ENABLED: "0"
|
||||||
- name: build tests
|
- name: build tests
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go test -exec=true ./...
|
run: ./tool/go test -exec=true ./...
|
||||||
env:
|
env:
|
||||||
GOOS: ${{ matrix.goos }}
|
GOOS: ${{ matrix.goos }}
|
||||||
GOARCH: ${{ matrix.goarch }}
|
GOARCH: ${{ matrix.goarch }}
|
||||||
CGO_ENABLED: "0"
|
CGO_ENABLED: "0"
|
||||||
- name: Tidy cache
|
- name: Tidy cache
|
||||||
|
working-directory: src
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
find $(go env GOCACHE) -type f -mmin +90 -delete
|
find $(go env GOCACHE) -type f -mmin +90 -delete
|
||||||
find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete
|
|
||||||
|
|
||||||
ios: # similar to cross above, but iOS can't build most of the repo. So, just
|
ios: # similar to cross above, but iOS can't build most of the repo. So, just
|
||||||
#make it build a few smoke packages.
|
# make it build a few smoke packages.
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build some
|
- name: build some
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build ./ipn/... ./ssh/tailssh ./wgengine/ ./types/... ./control/controlclient
|
run: ./tool/go build ./ipn/... ./ssh/tailssh ./wgengine/ ./types/... ./control/controlclient
|
||||||
env:
|
env:
|
||||||
GOOS: ios
|
GOOS: ios
|
||||||
GOARCH: arm64
|
GOARCH: arm64
|
||||||
|
|
||||||
crossmin: # cross-compile for platforms where we only check cmd/tailscale{,d}
|
crossmin: # cross-compile for platforms where we only check cmd/tailscale{,d}
|
||||||
|
needs: gomod-cache
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # don't abort the entire matrix if one element fails
|
fail-fast: false # don't abort the entire matrix if one element fails
|
||||||
matrix:
|
matrix:
|
||||||
@ -332,6 +439,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
- name: Restore Cache
|
- name: Restore Cache
|
||||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
@ -341,7 +450,6 @@ jobs:
|
|||||||
# fetched and extracted by tar
|
# fetched and extracted by tar
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
~/go/pkg/mod/cache
|
|
||||||
~\AppData\Local\go-build
|
~\AppData\Local\go-build
|
||||||
# The -2- here should be incremented when the scheme of data to be
|
# The -2- here should be incremented when the scheme of data to be
|
||||||
# cached changes (e.g. path above changes).
|
# cached changes (e.g. path above changes).
|
||||||
@ -349,7 +457,14 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
|
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
|
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build core
|
- name: build core
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled
|
run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled
|
||||||
env:
|
env:
|
||||||
GOOS: ${{ matrix.goos }}
|
GOOS: ${{ matrix.goos }}
|
||||||
@ -357,24 +472,34 @@ jobs:
|
|||||||
GOARM: ${{ matrix.goarm }}
|
GOARM: ${{ matrix.goarm }}
|
||||||
CGO_ENABLED: "0"
|
CGO_ENABLED: "0"
|
||||||
- name: Tidy cache
|
- name: Tidy cache
|
||||||
|
working-directory: src
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
find $(go env GOCACHE) -type f -mmin +90 -delete
|
find $(go env GOCACHE) -type f -mmin +90 -delete
|
||||||
find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete
|
|
||||||
|
|
||||||
android:
|
android:
|
||||||
# similar to cross above, but android fails to build a few pieces of the
|
# similar to cross above, but android fails to build a few pieces of the
|
||||||
# repo. We should fix those pieces, they're small, but as a stepping stone,
|
# repo. We should fix those pieces, they're small, but as a stepping stone,
|
||||||
# only test the subset of android that our past smoke test checked.
|
# only test the subset of android that our past smoke test checked.
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
|
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
|
||||||
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
|
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
|
||||||
# some Android breakages early.
|
# some Android breakages early.
|
||||||
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
|
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build some
|
- name: build some
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version ./ssh/tailssh
|
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/netmon ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version ./ssh/tailssh
|
||||||
env:
|
env:
|
||||||
GOOS: android
|
GOOS: android
|
||||||
@ -382,9 +507,12 @@ jobs:
|
|||||||
|
|
||||||
wasm: # builds tsconnect, which is the only wasm build we support
|
wasm: # builds tsconnect, which is the only wasm build we support
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
- name: Restore Cache
|
- name: Restore Cache
|
||||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
@ -394,7 +522,6 @@ jobs:
|
|||||||
# fetched and extracted by tar
|
# fetched and extracted by tar
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
~/go/pkg/mod/cache
|
|
||||||
~\AppData\Local\go-build
|
~\AppData\Local\go-build
|
||||||
# The -2- here should be incremented when the scheme of data to be
|
# The -2- here should be incremented when the scheme of data to be
|
||||||
# cached changes (e.g. path above changes).
|
# cached changes (e.g. path above changes).
|
||||||
@ -402,28 +529,45 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||||
${{ github.job }}-${{ runner.os }}-go-2-
|
${{ github.job }}-${{ runner.os }}-go-2-
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: build tsconnect client
|
- name: build tsconnect client
|
||||||
|
working-directory: src
|
||||||
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
|
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
|
||||||
env:
|
env:
|
||||||
GOOS: js
|
GOOS: js
|
||||||
GOARCH: wasm
|
GOARCH: wasm
|
||||||
- name: build tsconnect server
|
- name: build tsconnect server
|
||||||
|
working-directory: src
|
||||||
# Note, no GOOS/GOARCH in env on this build step, we're running a build
|
# Note, no GOOS/GOARCH in env on this build step, we're running a build
|
||||||
# tool that handles the build itself.
|
# tool that handles the build itself.
|
||||||
run: |
|
run: |
|
||||||
./tool/go run ./cmd/tsconnect --fast-compression build
|
./tool/go run ./cmd/tsconnect --fast-compression build
|
||||||
./tool/go run ./cmd/tsconnect --fast-compression build-pkg
|
./tool/go run ./cmd/tsconnect --fast-compression build-pkg
|
||||||
- name: Tidy cache
|
- name: Tidy cache
|
||||||
|
working-directory: src
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
find $(go env GOCACHE) -type f -mmin +90 -delete
|
find $(go env GOCACHE) -type f -mmin +90 -delete
|
||||||
find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete
|
|
||||||
|
|
||||||
tailscale_go: # Subset of tests that depend on our custom Go toolchain.
|
tailscale_go: # Subset of tests that depend on our custom Go toolchain.
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
- name: Set GOMODCACHE env
|
||||||
|
run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: test tailscale_go
|
- name: test tailscale_go
|
||||||
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
|
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
|
||||||
|
|
||||||
@ -477,7 +621,7 @@ jobs:
|
|||||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
||||||
with:
|
with:
|
||||||
oss-fuzz-project-name: 'tailscale'
|
oss-fuzz-project-name: 'tailscale'
|
||||||
fuzz-seconds: 300
|
fuzz-seconds: 150
|
||||||
dry-run: false
|
dry-run: false
|
||||||
language: go
|
language: go
|
||||||
- name: Set artifacts_path in env (workaround for actions/upload-artifact#176)
|
- name: Set artifacts_path in env (workaround for actions/upload-artifact#176)
|
||||||
@ -493,19 +637,40 @@ jobs:
|
|||||||
|
|
||||||
depaware:
|
depaware:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Set GOMODCACHE env
|
||||||
|
run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: check depaware
|
- name: check depaware
|
||||||
run: |
|
working-directory: src
|
||||||
make depaware
|
run: make depaware
|
||||||
|
|
||||||
go_generate:
|
go_generate:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: check that 'go generate' is clean
|
- name: check that 'go generate' is clean
|
||||||
|
working-directory: src
|
||||||
run: |
|
run: |
|
||||||
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
|
pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp')
|
||||||
./tool/go generate $pkgs
|
./tool/go generate $pkgs
|
||||||
@ -515,10 +680,20 @@ jobs:
|
|||||||
|
|
||||||
go_mod_tidy:
|
go_mod_tidy:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: check that 'go mod tidy' is clean
|
- name: check that 'go mod tidy' is clean
|
||||||
|
working-directory: src
|
||||||
run: |
|
run: |
|
||||||
./tool/go mod tidy
|
./tool/go mod tidy
|
||||||
echo
|
echo
|
||||||
@ -527,14 +702,27 @@ jobs:
|
|||||||
|
|
||||||
licenses:
|
licenses:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: check licenses
|
- name: check licenses
|
||||||
run: ./scripts/check_license_headers.sh .
|
working-directory: src
|
||||||
|
run: |
|
||||||
|
grep -q TestLicenseHeaders *.go || (echo "Expected a test named TestLicenseHeaders"; exit 1)
|
||||||
|
./tool/go test -v -run=TestLicenseHeaders
|
||||||
|
|
||||||
staticcheck:
|
staticcheck:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
needs: gomod-cache
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false # don't abort the entire matrix if one element fails
|
fail-fast: false # don't abort the entire matrix if one element fails
|
||||||
matrix:
|
matrix:
|
||||||
@ -546,16 +734,22 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: install staticcheck
|
with:
|
||||||
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck
|
path: src
|
||||||
|
- name: Restore Go module cache
|
||||||
|
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: gomodcache
|
||||||
|
key: ${{ needs.gomod-cache.outputs.cache-key }}
|
||||||
|
enableCrossOsArchive: true
|
||||||
- name: run staticcheck
|
- name: run staticcheck
|
||||||
|
working-directory: src
|
||||||
run: |
|
run: |
|
||||||
export GOROOT=$(./tool/go env GOROOT)
|
export GOROOT=$(./tool/go env GOROOT)
|
||||||
export PATH=$GOROOT/bin:$PATH
|
./tool/go run -exec \
|
||||||
staticcheck -- $(./tool/go list ./... | grep -v tempfork)
|
"env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }}" \
|
||||||
env:
|
honnef.co/go/tools/cmd/staticcheck -- \
|
||||||
GOOS: ${{ matrix.goos }}
|
$(env GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} ./tool/go list ./... | grep -v tempfork)
|
||||||
GOARCH: ${{ matrix.goarch }}
|
|
||||||
|
|
||||||
notify_slack:
|
notify_slack:
|
||||||
if: always()
|
if: always()
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package chirp
|
package chirp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,12 +1,11 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
//go:build go1.22
|
|
||||||
|
|
||||||
// Package local contains a Go client for the Tailscale LocalAPI.
|
// Package local contains a Go client for the Tailscale LocalAPI.
|
||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"cmp"
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
@ -16,6 +15,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"iter"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptrace"
|
"net/http/httptrace"
|
||||||
@ -42,6 +42,7 @@ import (
|
|||||||
"tailscale.com/types/dnstype"
|
"tailscale.com/types/dnstype"
|
||||||
"tailscale.com/types/key"
|
"tailscale.com/types/key"
|
||||||
"tailscale.com/types/tkatype"
|
"tailscale.com/types/tkatype"
|
||||||
|
"tailscale.com/util/eventbus"
|
||||||
"tailscale.com/util/syspolicy/setting"
|
"tailscale.com/util/syspolicy/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -414,24 +415,42 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) {
|
|||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StreamBusEvents returns a stream of the Tailscale bus events as they arrive.
|
// StreamBusEvents returns an iterator of Tailscale bus events as they arrive.
|
||||||
// Close the context to stop the stream.
|
// Each pair is a valid event and a nil error, or a zero event a non-nil error.
|
||||||
// Expected response from the server is newline-delimited JSON.
|
// In case of error, the iterator ends after the pair reporting the error.
|
||||||
// The caller must close the reader when it is finished reading.
|
// Iteration stops if ctx ends.
|
||||||
func (lc *Client) StreamBusEvents(ctx context.Context) (io.ReadCloser, error) {
|
func (lc *Client) StreamBusEvents(ctx context.Context) iter.Seq2[eventbus.DebugEvent, error] {
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET",
|
return func(yield func(eventbus.DebugEvent, error) bool) {
|
||||||
"http://"+apitype.LocalAPIHost+"/localapi/v0/debug-bus-events", nil)
|
req, err := http.NewRequestWithContext(ctx, "GET",
|
||||||
if err != nil {
|
"http://"+apitype.LocalAPIHost+"/localapi/v0/debug-bus-events", nil)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
yield(eventbus.DebugEvent{}, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
res, err := lc.doLocalRequestNiceError(req)
|
||||||
|
if err != nil {
|
||||||
|
yield(eventbus.DebugEvent{}, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
yield(eventbus.DebugEvent{}, errors.New(res.Status))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
dec := json.NewDecoder(bufio.NewReader(res.Body))
|
||||||
|
for {
|
||||||
|
var evt eventbus.DebugEvent
|
||||||
|
if err := dec.Decode(&evt); err == io.EOF {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
yield(eventbus.DebugEvent{}, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !yield(evt, nil) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
res, err := lc.doLocalRequestNiceError(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
return nil, errors.New(res.Status)
|
|
||||||
}
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pprof returns a pprof profile of the Tailscale daemon.
|
// Pprof returns a pprof profile of the Tailscale daemon.
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -157,7 +157,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||||||
💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting
|
💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting
|
||||||
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
|
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
|
||||||
tailscale.com/util/dnsname from tailscale.com/hostinfo+
|
tailscale.com/util/dnsname from tailscale.com/hostinfo+
|
||||||
tailscale.com/util/eventbus from tailscale.com/net/netmon
|
tailscale.com/util/eventbus from tailscale.com/net/netmon+
|
||||||
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
|
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
|
||||||
tailscale.com/util/httpm from tailscale.com/client/tailscale
|
tailscale.com/util/httpm from tailscale.com/client/tailscale
|
||||||
tailscale.com/util/lineiter from tailscale.com/hostinfo+
|
tailscale.com/util/lineiter from tailscale.com/hostinfo+
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -252,7 +252,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin
|
|||||||
return false, fmt.Errorf("error determining DNS name base: %w", err)
|
return false, fmt.Errorf("error determining DNS name base: %w", err)
|
||||||
}
|
}
|
||||||
dnsName := hostname + "." + tcd
|
dnsName := hostname + "." + tcd
|
||||||
if err := r.ensureCertResources(ctx, pgName, dnsName, ing); err != nil {
|
if err := r.ensureCertResources(ctx, pg, dnsName, ing); err != nil {
|
||||||
return false, fmt.Errorf("error ensuring cert resources: %w", err)
|
return false, fmt.Errorf("error ensuring cert resources: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -931,18 +931,31 @@ func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool {
|
|||||||
// (domain) is a valid Kubernetes resource name.
|
// (domain) is a valid Kubernetes resource name.
|
||||||
// https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99
|
// https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99
|
||||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
|
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
|
||||||
func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string, ing *networkingv1.Ingress) error {
|
func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string, ing *networkingv1.Ingress) error {
|
||||||
secret := certSecret(pgName, r.tsNamespace, domain, ing)
|
secret := certSecret(pg.Name, r.tsNamespace, domain, ing)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, nil); err != nil {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) {
|
||||||
|
// Labels might have changed if the Ingress has been updated to use a
|
||||||
|
// different ProxyGroup.
|
||||||
|
s.Labels = secret.Labels
|
||||||
|
}); err != nil {
|
||||||
return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err)
|
return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err)
|
||||||
}
|
}
|
||||||
role := certSecretRole(pgName, r.tsNamespace, domain)
|
role := certSecretRole(pg.Name, r.tsNamespace, domain)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, nil); err != nil {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
||||||
|
// Labels might have changed if the Ingress has been updated to use a
|
||||||
|
// different ProxyGroup.
|
||||||
|
r.Labels = role.Labels
|
||||||
|
}); err != nil {
|
||||||
return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err)
|
return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err)
|
||||||
}
|
}
|
||||||
rb := certSecretRoleBinding(pgName, r.tsNamespace, domain)
|
rolebinding := certSecretRoleBinding(pg.Name, r.tsNamespace, domain)
|
||||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rb, nil); err != nil {
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) {
|
||||||
return fmt.Errorf("failed to create or update RoleBinding %s: %w", rb.Name, err)
|
// Labels and subjects might have changed if the Ingress has been updated to use a
|
||||||
|
// different ProxyGroup.
|
||||||
|
rb.Labels = rolebinding.Labels
|
||||||
|
rb.Subjects = rolebinding.Subjects
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
verifyServeConfig(t, fc, "svc:my-svc", false)
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
||||||
verifyTailscaledConfig(t, fc, []string{"svc:my-svc"})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"})
|
||||||
|
|
||||||
// Verify that Role and RoleBinding have been created for the first Ingress.
|
// Verify that Role and RoleBinding have been created for the first Ingress.
|
||||||
// Do not verify the cert Secret as that was already verified implicitly above.
|
// Do not verify the cert Secret as that was already verified implicitly above.
|
||||||
@ -132,7 +132,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
verifyServeConfig(t, fc, "svc:my-other-svc", false)
|
verifyServeConfig(t, fc, "svc:my-other-svc", false)
|
||||||
verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"})
|
verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"})
|
||||||
|
|
||||||
// Verify that Role and RoleBinding have been created for the first Ingress.
|
// Verify that Role and RoleBinding have been created for the second Ingress.
|
||||||
// Do not verify the cert Secret as that was already verified implicitly above.
|
// Do not verify the cert Secret as that was already verified implicitly above.
|
||||||
expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net"))
|
expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net"))
|
||||||
expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net"))
|
expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net"))
|
||||||
@ -141,7 +141,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
verifyServeConfig(t, fc, "svc:my-svc", false)
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
||||||
|
|
||||||
verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"})
|
||||||
|
|
||||||
// Delete second Ingress
|
// Delete second Ingress
|
||||||
if err := fc.Delete(context.Background(), ing2); err != nil {
|
if err := fc.Delete(context.Background(), ing2); err != nil {
|
||||||
@ -172,11 +172,20 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
t.Error("second Ingress service config was not cleaned up")
|
t.Error("second Ingress service config was not cleaned up")
|
||||||
}
|
}
|
||||||
|
|
||||||
verifyTailscaledConfig(t, fc, []string{"svc:my-svc"})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"})
|
||||||
expectMissing[corev1.Secret](t, fc, "operator-ns", "my-other-svc.ts.net")
|
expectMissing[corev1.Secret](t, fc, "operator-ns", "my-other-svc.ts.net")
|
||||||
expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-other-svc.ts.net")
|
expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-other-svc.ts.net")
|
||||||
expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-other-svc.ts.net")
|
expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-other-svc.ts.net")
|
||||||
|
|
||||||
|
// Test Ingress ProxyGroup change
|
||||||
|
createPGResources(t, fc, "test-pg-second")
|
||||||
|
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
||||||
|
ing.Annotations["tailscale.com/proxy-group"] = "test-pg-second"
|
||||||
|
})
|
||||||
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
|
expectEqual(t, fc, certSecretRole("test-pg-second", "operator-ns", "my-svc.ts.net"))
|
||||||
|
expectEqual(t, fc, certSecretRoleBinding("test-pg-second", "operator-ns", "my-svc.ts.net"))
|
||||||
|
|
||||||
// Delete the first Ingress and verify cleanup
|
// Delete the first Ingress and verify cleanup
|
||||||
if err := fc.Delete(context.Background(), ing); err != nil {
|
if err := fc.Delete(context.Background(), ing); err != nil {
|
||||||
t.Fatalf("deleting Ingress: %v", err)
|
t.Fatalf("deleting Ingress: %v", err)
|
||||||
@ -187,7 +196,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
// Verify the ConfigMap was cleaned up
|
// Verify the ConfigMap was cleaned up
|
||||||
cm = &corev1.ConfigMap{}
|
cm = &corev1.ConfigMap{}
|
||||||
if err := fc.Get(context.Background(), types.NamespacedName{
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
Name: "test-pg-ingress-config",
|
Name: "test-pg-second-ingress-config",
|
||||||
Namespace: "operator-ns",
|
Namespace: "operator-ns",
|
||||||
}, cm); err != nil {
|
}, cm); err != nil {
|
||||||
t.Fatalf("getting ConfigMap: %v", err)
|
t.Fatalf("getting ConfigMap: %v", err)
|
||||||
@ -201,7 +210,7 @@ func TestIngressPGReconciler(t *testing.T) {
|
|||||||
if len(cfg.Services) > 0 {
|
if len(cfg.Services) > 0 {
|
||||||
t.Error("serve config not cleaned up")
|
t.Error("serve config not cleaned up")
|
||||||
}
|
}
|
||||||
verifyTailscaledConfig(t, fc, nil)
|
verifyTailscaledConfig(t, fc, "test-pg-second", nil)
|
||||||
|
|
||||||
// Add verification that cert resources were cleaned up
|
// Add verification that cert resources were cleaned up
|
||||||
expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net")
|
expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net")
|
||||||
@ -245,7 +254,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) {
|
|||||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
verifyServeConfig(t, fc, "svc:my-svc", false)
|
verifyServeConfig(t, fc, "svc:my-svc", false)
|
||||||
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"})
|
||||||
verifyTailscaledConfig(t, fc, []string{"svc:my-svc"})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc"})
|
||||||
|
|
||||||
// Update the Ingress hostname and make sure the original Tailscale Service is deleted.
|
// Update the Ingress hostname and make sure the original Tailscale Service is deleted.
|
||||||
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) {
|
||||||
@ -256,7 +265,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) {
|
|||||||
expectReconciled(t, ingPGR, "default", "test-ingress")
|
expectReconciled(t, ingPGR, "default", "test-ingress")
|
||||||
verifyServeConfig(t, fc, "svc:updated-svc", false)
|
verifyServeConfig(t, fc, "svc:updated-svc", false)
|
||||||
verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"})
|
verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"})
|
||||||
verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"})
|
||||||
|
|
||||||
_, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc"))
|
_, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -550,183 +559,6 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) {
|
|
||||||
t.Helper()
|
|
||||||
tsSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("getting Tailscale Service %q: %v", serviceName, err)
|
|
||||||
}
|
|
||||||
if tsSvc == nil {
|
|
||||||
t.Fatalf("Tailscale Service %q not created", serviceName)
|
|
||||||
}
|
|
||||||
gotPorts := slices.Clone(tsSvc.Ports)
|
|
||||||
slices.Sort(gotPorts)
|
|
||||||
slices.Sort(wantPorts)
|
|
||||||
if !slices.Equal(gotPorts, wantPorts) {
|
|
||||||
t.Errorf("incorrect ports for Tailscale Service %q: got %v, want %v", serviceName, gotPorts, wantPorts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantHTTP bool) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
cm := &corev1.ConfigMap{}
|
|
||||||
if err := fc.Get(context.Background(), types.NamespacedName{
|
|
||||||
Name: "test-pg-ingress-config",
|
|
||||||
Namespace: "operator-ns",
|
|
||||||
}, cm); err != nil {
|
|
||||||
t.Fatalf("getting ConfigMap: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := &ipn.ServeConfig{}
|
|
||||||
if err := json.Unmarshal(cm.BinaryData["serve-config.json"], cfg); err != nil {
|
|
||||||
t.Fatalf("unmarshaling serve config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("Looking for service %q in config: %+v", serviceName, cfg)
|
|
||||||
|
|
||||||
svc := cfg.Services[tailcfg.ServiceName(serviceName)]
|
|
||||||
if svc == nil {
|
|
||||||
t.Fatalf("service %q not found in serve config, services: %+v", serviceName, maps.Keys(cfg.Services))
|
|
||||||
}
|
|
||||||
|
|
||||||
wantHandlers := 1
|
|
||||||
if wantHTTP {
|
|
||||||
wantHandlers = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check TCP handlers
|
|
||||||
if len(svc.TCP) != wantHandlers {
|
|
||||||
t.Errorf("incorrect number of TCP handlers for service %q: got %d, want %d", serviceName, len(svc.TCP), wantHandlers)
|
|
||||||
}
|
|
||||||
if wantHTTP {
|
|
||||||
if h, ok := svc.TCP[uint16(80)]; !ok {
|
|
||||||
t.Errorf("HTTP (port 80) handler not found for service %q", serviceName)
|
|
||||||
} else if !h.HTTP {
|
|
||||||
t.Errorf("HTTP not enabled for port 80 handler for service %q", serviceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if h, ok := svc.TCP[uint16(443)]; !ok {
|
|
||||||
t.Errorf("HTTPS (port 443) handler not found for service %q", serviceName)
|
|
||||||
} else if !h.HTTPS {
|
|
||||||
t.Errorf("HTTPS not enabled for port 443 handler for service %q", serviceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Web handlers
|
|
||||||
if len(svc.Web) != wantHandlers {
|
|
||||||
t.Errorf("incorrect number of Web handlers for service %q: got %d, want %d", serviceName, len(svc.Web), wantHandlers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) {
|
|
||||||
t.Helper()
|
|
||||||
var expected string
|
|
||||||
if expectedServices != nil && len(expectedServices) > 0 {
|
|
||||||
expectedServicesJSON, err := json.Marshal(expectedServices)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("marshaling expected services: %v", err)
|
|
||||||
}
|
|
||||||
expected = fmt.Sprintf(`,"AdvertiseServices":%s`, expectedServicesJSON)
|
|
||||||
}
|
|
||||||
expectEqual(t, fc, &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: pgConfigSecretName("test-pg", 0),
|
|
||||||
Namespace: "operator-ns",
|
|
||||||
Labels: pgSecretLabels("test-pg", "config"),
|
|
||||||
},
|
|
||||||
Data: map[string][]byte{
|
|
||||||
tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) {
|
|
||||||
tsIngressClass := &networkingv1.IngressClass{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
|
||||||
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-create the ProxyGroup
|
|
||||||
pg := &tsapi.ProxyGroup{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pg",
|
|
||||||
Generation: 1,
|
|
||||||
},
|
|
||||||
Spec: tsapi.ProxyGroupSpec{
|
|
||||||
Type: tsapi.ProxyGroupTypeIngress,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-create the ConfigMap for the ProxyGroup
|
|
||||||
pgConfigMap := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pg-ingress-config",
|
|
||||||
Namespace: "operator-ns",
|
|
||||||
},
|
|
||||||
BinaryData: map[string][]byte{
|
|
||||||
"serve-config.json": []byte(`{"Services":{}}`),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-create a config Secret for the ProxyGroup
|
|
||||||
pgCfgSecret := &corev1.Secret{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: pgConfigSecretName("test-pg", 0),
|
|
||||||
Namespace: "operator-ns",
|
|
||||||
Labels: pgSecretLabels("test-pg", "config"),
|
|
||||||
},
|
|
||||||
Data: map[string][]byte{
|
|
||||||
tsoperator.TailscaledConfigFileName(106): []byte("{}"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
fc := fake.NewClientBuilder().
|
|
||||||
WithScheme(tsapi.GlobalScheme).
|
|
||||||
WithObjects(pg, pgCfgSecret, pgConfigMap, tsIngressClass).
|
|
||||||
WithStatusSubresource(pg).
|
|
||||||
Build()
|
|
||||||
|
|
||||||
// Set ProxyGroup status to ready
|
|
||||||
pg.Status.Conditions = []metav1.Condition{
|
|
||||||
{
|
|
||||||
Type: string(tsapi.ProxyGroupReady),
|
|
||||||
Status: metav1.ConditionTrue,
|
|
||||||
ObservedGeneration: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
|
||||||
|
|
||||||
ft := &fakeTSClient{}
|
|
||||||
zl, err := zap.NewDevelopment()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lc := &fakeLocalClient{
|
|
||||||
status: &ipnstate.Status{
|
|
||||||
CurrentTailnet: &ipnstate.TailnetStatus{
|
|
||||||
MagicDNSSuffix: "ts.net",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ingPGR := &HAIngressReconciler{
|
|
||||||
Client: fc,
|
|
||||||
tsClient: ft,
|
|
||||||
defaultTags: []string{"tag:k8s"},
|
|
||||||
tsNamespace: "operator-ns",
|
|
||||||
tsnetServer: fakeTsnetServer,
|
|
||||||
logger: zl.Sugar(),
|
|
||||||
recorder: record.NewFakeRecorder(10),
|
|
||||||
lc: lc,
|
|
||||||
}
|
|
||||||
|
|
||||||
return ingPGR, fc, ft
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
func TestIngressPGReconciler_MultiCluster(t *testing.T) {
|
||||||
ingPGR, fc, ft := setupIngressTest(t)
|
ingPGR, fc, ft := setupIngressTest(t)
|
||||||
ingPGR.operatorID = "operator-1"
|
ingPGR.operatorID = "operator-1"
|
||||||
@ -837,3 +669,187 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri
|
|||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) {
|
||||||
|
t.Helper()
|
||||||
|
tsSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getting Tailscale Service %q: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
if tsSvc == nil {
|
||||||
|
t.Fatalf("Tailscale Service %q not created", serviceName)
|
||||||
|
}
|
||||||
|
gotPorts := slices.Clone(tsSvc.Ports)
|
||||||
|
slices.Sort(gotPorts)
|
||||||
|
slices.Sort(wantPorts)
|
||||||
|
if !slices.Equal(gotPorts, wantPorts) {
|
||||||
|
t.Errorf("incorrect ports for Tailscale Service %q: got %v, want %v", serviceName, gotPorts, wantPorts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantHTTP bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
cm := &corev1.ConfigMap{}
|
||||||
|
if err := fc.Get(context.Background(), types.NamespacedName{
|
||||||
|
Name: "test-pg-ingress-config",
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
}, cm); err != nil {
|
||||||
|
t.Fatalf("getting ConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &ipn.ServeConfig{}
|
||||||
|
if err := json.Unmarshal(cm.BinaryData["serve-config.json"], cfg); err != nil {
|
||||||
|
t.Fatalf("unmarshaling serve config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Looking for service %q in config: %+v", serviceName, cfg)
|
||||||
|
|
||||||
|
svc := cfg.Services[tailcfg.ServiceName(serviceName)]
|
||||||
|
if svc == nil {
|
||||||
|
t.Fatalf("service %q not found in serve config, services: %+v", serviceName, maps.Keys(cfg.Services))
|
||||||
|
}
|
||||||
|
|
||||||
|
wantHandlers := 1
|
||||||
|
if wantHTTP {
|
||||||
|
wantHandlers = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check TCP handlers
|
||||||
|
if len(svc.TCP) != wantHandlers {
|
||||||
|
t.Errorf("incorrect number of TCP handlers for service %q: got %d, want %d", serviceName, len(svc.TCP), wantHandlers)
|
||||||
|
}
|
||||||
|
if wantHTTP {
|
||||||
|
if h, ok := svc.TCP[uint16(80)]; !ok {
|
||||||
|
t.Errorf("HTTP (port 80) handler not found for service %q", serviceName)
|
||||||
|
} else if !h.HTTP {
|
||||||
|
t.Errorf("HTTP not enabled for port 80 handler for service %q", serviceName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if h, ok := svc.TCP[uint16(443)]; !ok {
|
||||||
|
t.Errorf("HTTPS (port 443) handler not found for service %q", serviceName)
|
||||||
|
} else if !h.HTTPS {
|
||||||
|
t.Errorf("HTTPS not enabled for port 443 handler for service %q", serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Web handlers
|
||||||
|
if len(svc.Web) != wantHandlers {
|
||||||
|
t.Errorf("incorrect number of Web handlers for service %q: got %d, want %d", serviceName, len(svc.Web), wantHandlers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expectedServices []string) {
|
||||||
|
t.Helper()
|
||||||
|
var expected string
|
||||||
|
if expectedServices != nil && len(expectedServices) > 0 {
|
||||||
|
expectedServicesJSON, err := json.Marshal(expectedServices)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshaling expected services: %v", err)
|
||||||
|
}
|
||||||
|
expected = fmt.Sprintf(`,"AdvertiseServices":%s`, expectedServicesJSON)
|
||||||
|
}
|
||||||
|
expectEqual(t, fc, &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pgConfigSecretName(pgName, 0),
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
Labels: pgSecretLabels(pgName, "config"),
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPGResources(t *testing.T, fc client.Client, pgName string) {
|
||||||
|
t.Helper()
|
||||||
|
// Pre-create the ProxyGroup
|
||||||
|
pg := &tsapi.ProxyGroup{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pgName,
|
||||||
|
Generation: 1,
|
||||||
|
},
|
||||||
|
Spec: tsapi.ProxyGroupSpec{
|
||||||
|
Type: tsapi.ProxyGroupTypeIngress,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mustCreate(t, fc, pg)
|
||||||
|
|
||||||
|
// Pre-create the ConfigMap for the ProxyGroup
|
||||||
|
pgConfigMap := &corev1.ConfigMap{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-ingress-config", pgName),
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
},
|
||||||
|
BinaryData: map[string][]byte{
|
||||||
|
"serve-config.json": []byte(`{"Services":{}}`),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mustCreate(t, fc, pgConfigMap)
|
||||||
|
|
||||||
|
// Pre-create a config Secret for the ProxyGroup
|
||||||
|
pgCfgSecret := &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pgConfigSecretName(pgName, 0),
|
||||||
|
Namespace: "operator-ns",
|
||||||
|
Labels: pgSecretLabels(pgName, "config"),
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
tsoperator.TailscaledConfigFileName(106): []byte("{}"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mustCreate(t, fc, pgCfgSecret)
|
||||||
|
pg.Status.Conditions = []metav1.Condition{
|
||||||
|
{
|
||||||
|
Type: string(tsapi.ProxyGroupReady),
|
||||||
|
Status: metav1.ConditionTrue,
|
||||||
|
ObservedGeneration: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := fc.Status().Update(context.Background(), pg); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) {
|
||||||
|
tsIngressClass := &networkingv1.IngressClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "tailscale"},
|
||||||
|
Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"},
|
||||||
|
}
|
||||||
|
|
||||||
|
fc := fake.NewClientBuilder().
|
||||||
|
WithScheme(tsapi.GlobalScheme).
|
||||||
|
WithObjects(tsIngressClass).
|
||||||
|
WithStatusSubresource(&tsapi.ProxyGroup{}).
|
||||||
|
Build()
|
||||||
|
|
||||||
|
createPGResources(t, fc, "test-pg")
|
||||||
|
|
||||||
|
fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
|
||||||
|
|
||||||
|
ft := &fakeTSClient{}
|
||||||
|
zl, err := zap.NewDevelopment()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lc := &fakeLocalClient{
|
||||||
|
status: &ipnstate.Status{
|
||||||
|
CurrentTailnet: &ipnstate.TailnetStatus{
|
||||||
|
MagicDNSSuffix: "ts.net",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ingPGR := &HAIngressReconciler{
|
||||||
|
Client: fc,
|
||||||
|
tsClient: ft,
|
||||||
|
defaultTags: []string{"tag:k8s"},
|
||||||
|
tsNamespace: "operator-ns",
|
||||||
|
tsnetServer: fakeTsnetServer,
|
||||||
|
logger: zl.Sugar(),
|
||||||
|
recorder: record.NewFakeRecorder(10),
|
||||||
|
lc: lc,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ingPGR, fc, ft
|
||||||
|
}
|
||||||
|
@ -46,7 +46,7 @@ func TestServicePGReconciler(t *testing.T) {
|
|||||||
|
|
||||||
config = append(config, fmt.Sprintf("svc:default-%s", svc.Name))
|
config = append(config, fmt.Sprintf("svc:default-%s", svc.Name))
|
||||||
verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"})
|
verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"})
|
||||||
verifyTailscaledConfig(t, fc, config)
|
verifyTailscaledConfig(t, fc, "test-pg", config)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, svc := range svcs {
|
for i, svc := range svcs {
|
||||||
@ -75,7 +75,7 @@ func TestServicePGReconciler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
config = removeEl(config, fmt.Sprintf("svc:default-%s", svc.Name))
|
config = removeEl(config, fmt.Sprintf("svc:default-%s", svc.Name))
|
||||||
verifyTailscaledConfig(t, fc, config)
|
verifyTailscaledConfig(t, fc, "test-pg", config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) {
|
|||||||
expectReconciled(t, svcPGR, "default", svc.Name)
|
expectReconciled(t, svcPGR, "default", svc.Name)
|
||||||
|
|
||||||
verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"})
|
verifyTailscaleService(t, ft, fmt.Sprintf("svc:default-%s", svc.Name), []string{"do-not-validate"})
|
||||||
verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:default-%s", svc.Name)})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{fmt.Sprintf("svc:default-%s", svc.Name)})
|
||||||
|
|
||||||
hostname := "foobarbaz"
|
hostname := "foobarbaz"
|
||||||
mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) {
|
mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) {
|
||||||
@ -100,7 +100,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) {
|
|||||||
expectReconciled(t, svcPGR, "default", svc.Name)
|
expectReconciled(t, svcPGR, "default", svc.Name)
|
||||||
|
|
||||||
verifyTailscaleService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"})
|
verifyTailscaleService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"})
|
||||||
verifyTailscaledConfig(t, fc, []string{fmt.Sprintf("svc:%s", hostname)})
|
verifyTailscaledConfig(t, fc, "test-pg", []string{fmt.Sprintf("svc:%s", hostname)})
|
||||||
|
|
||||||
_, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name)))
|
_, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name)))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -334,7 +334,7 @@ func TestIgnoreRegularService(t *testing.T) {
|
|||||||
mustCreate(t, fc, svc)
|
mustCreate(t, fc, svc)
|
||||||
expectReconciled(t, pgr, "default", "test")
|
expectReconciled(t, pgr, "default", "test")
|
||||||
|
|
||||||
verifyTailscaledConfig(t, fc, nil)
|
verifyTailscaledConfig(t, fc, "test-pg", nil)
|
||||||
|
|
||||||
tsSvcs, err := ft.ListVIPServices(context.Background())
|
tsSvcs, err := ft.ListVIPServices(context.Background())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -54,6 +54,7 @@ func main() {
|
|||||||
hostname = fs.String("hostname", "", "Hostname to register the service under")
|
hostname = fs.String("hostname", "", "Hostname to register the service under")
|
||||||
siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration")
|
siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration")
|
||||||
v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise")
|
v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise")
|
||||||
|
dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)")
|
||||||
verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet")
|
verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet")
|
||||||
printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit")
|
printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit")
|
||||||
ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore")
|
ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore")
|
||||||
@ -78,7 +79,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ignoreDstTable *bart.Table[bool]
|
var ignoreDstTable *bart.Table[bool]
|
||||||
for _, s := range strings.Split(*ignoreDstPfxStr, ",") {
|
for s := range strings.SplitSeq(*ignoreDstPfxStr, ",") {
|
||||||
s := strings.TrimSpace(s)
|
s := strings.TrimSpace(s)
|
||||||
if s == "" {
|
if s == "" {
|
||||||
continue
|
continue
|
||||||
@ -185,11 +186,37 @@ func main() {
|
|||||||
ipPool: ipp,
|
ipPool: ipp,
|
||||||
routes: routes,
|
routes: routes,
|
||||||
dnsAddr: dnsAddr,
|
dnsAddr: dnsAddr,
|
||||||
resolver: net.DefaultResolver,
|
resolver: getResolver(*dnsServers),
|
||||||
}
|
}
|
||||||
c.run(ctx, lc)
|
c.run(ctx, lc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getResolver parses serverFlag and returns either the default resolver, or a
|
||||||
|
// resolver that uses the provided comma-separated DNS server AddrPort's, or
|
||||||
|
// panics.
|
||||||
|
func getResolver(serverFlag string) lookupNetIPer {
|
||||||
|
if serverFlag == "" {
|
||||||
|
return net.DefaultResolver
|
||||||
|
}
|
||||||
|
var addrs []string
|
||||||
|
for s := range strings.SplitSeq(serverFlag, ",") {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
addr, err := netip.ParseAddrPort(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("dns server provided: %q does not parse: %v", s, err)
|
||||||
|
}
|
||||||
|
addrs = append(addrs, addr.String())
|
||||||
|
}
|
||||||
|
return &net.Resolver{
|
||||||
|
PreferGo: true,
|
||||||
|
Dial: func(ctx context.Context, network string, address string) (net.Conn, error) {
|
||||||
|
var dialer net.Dialer
|
||||||
|
// TODO(raggi): perhaps something other than random?
|
||||||
|
return dialer.DialContext(ctx, network, addrs[rand.N(len(addrs))])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *netipx.IPSet) {
|
func calculateAddresses(prefixes []netip.Prefix) (*netipx.IPSet, netip.Addr, *netipx.IPSet) {
|
||||||
var ipsb netipx.IPSetBuilder
|
var ipsb netipx.IPSetBuilder
|
||||||
for _, p := range prefixes {
|
for _, p := range prefixes {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -480,3 +481,198 @@ func TestV6V4(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// echoServer is a simple server that just echos back data set to it.
|
||||||
|
type echoServer struct {
|
||||||
|
listener net.Listener
|
||||||
|
addr string
|
||||||
|
wg sync.WaitGroup
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEchoServer creates a new test DNS server on the specified network and address
|
||||||
|
func newEchoServer(t *testing.T, network, addr string) *echoServer {
|
||||||
|
listener, err := net.Listen(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create test DNS server: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
server := &echoServer{
|
||||||
|
listener: listener,
|
||||||
|
addr: listener.Addr().String(),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
server.wg.Add(1)
|
||||||
|
go server.serve()
|
||||||
|
|
||||||
|
return server
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *echoServer) serve() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.done:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
conn, err := s.listener.Accept()
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-s.done:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go s.handleConnection(conn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *echoServer) handleConnection(conn net.Conn) {
|
||||||
|
defer conn.Close()
|
||||||
|
// Simple response - just echo back some data to confirm connectivity
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
n, err := conn.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
conn.Write(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *echoServer) close() {
|
||||||
|
close(s.done)
|
||||||
|
s.listener.Close()
|
||||||
|
s.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetResolver(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
network string
|
||||||
|
addr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ipv4_loopback",
|
||||||
|
network: "tcp4",
|
||||||
|
addr: "127.0.0.1:0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ipv6_loopback",
|
||||||
|
network: "tcp6",
|
||||||
|
addr: "[::1]:0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
server := newEchoServer(t, tc.network, tc.addr)
|
||||||
|
defer server.close()
|
||||||
|
serverAddr := server.addr
|
||||||
|
resolver := getResolver(serverAddr)
|
||||||
|
if resolver == nil {
|
||||||
|
t.Fatal("getResolver returned nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
netResolver, ok := resolver.(*net.Resolver)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("getResolver did not return a *net.Resolver")
|
||||||
|
}
|
||||||
|
if netResolver.Dial == nil {
|
||||||
|
t.Fatal("resolver.Dial is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
conn, err := netResolver.Dial(ctx, "tcp", "dummy.address:53")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to dial test DNS server: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
testData := []byte("test")
|
||||||
|
_, err = conn.Write(testData)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to write to connection: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
response := make([]byte, len(testData))
|
||||||
|
_, err = conn.Read(response)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read from connection: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(response) != string(testData) {
|
||||||
|
t.Fatalf("Expected echo response %q, got %q", testData, response)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetResolverMultipleServers(t *testing.T) {
|
||||||
|
server1 := newEchoServer(t, "tcp4", "127.0.0.1:0")
|
||||||
|
defer server1.close()
|
||||||
|
server2 := newEchoServer(t, "tcp4", "127.0.0.1:0")
|
||||||
|
defer server2.close()
|
||||||
|
serverFlag := server1.addr + ", " + server2.addr
|
||||||
|
|
||||||
|
resolver := getResolver(serverFlag)
|
||||||
|
netResolver, ok := resolver.(*net.Resolver)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("getResolver did not return a *net.Resolver")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
servers := map[string]bool{
|
||||||
|
server1.addr: false,
|
||||||
|
server2.addr: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try up to 1000 times to hit all servers, this should be very quick, and
|
||||||
|
// if this fails randomness has regressed beyond reason.
|
||||||
|
for range 1000 {
|
||||||
|
conn, err := netResolver.Dial(ctx, "tcp", "dummy.address:53")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to dial test DNS server: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteAddr := conn.RemoteAddr().String()
|
||||||
|
|
||||||
|
conn.Close()
|
||||||
|
|
||||||
|
servers[remoteAddr] = true
|
||||||
|
|
||||||
|
var allDone = true
|
||||||
|
for _, done := range servers {
|
||||||
|
if !done {
|
||||||
|
allDone = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allDone {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var allDone = true
|
||||||
|
for _, done := range servers {
|
||||||
|
if !done {
|
||||||
|
allDone = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !allDone {
|
||||||
|
t.Errorf("after 1000 queries, not all servers were hit, significant lack of randomness: %#v", servers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetResolverEmpty(t *testing.T) {
|
||||||
|
resolver := getResolver("")
|
||||||
|
if resolver != net.DefaultResolver {
|
||||||
|
t.Fatal(`getResolver("") should return net.DefaultResolver`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -791,21 +791,14 @@ func runDaemonLogs(ctx context.Context, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runDaemonBusEvents(ctx context.Context, args []string) error {
|
func runDaemonBusEvents(ctx context.Context, args []string) error {
|
||||||
logs, err := localClient.StreamBusEvents(ctx)
|
for line, err := range localClient.StreamBusEvents(ctx) {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer logs.Close()
|
|
||||||
d := json.NewDecoder(bufio.NewReader(logs))
|
|
||||||
for {
|
|
||||||
var line eventbus.DebugEvent
|
|
||||||
err := d.Decode(&line)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("[%d][%q][from: %q][to: %q] %s\n", line.Count, line.Type,
|
fmt.Printf("[%d][%q][from: %q][to: %q] %s\n", line.Count, line.Type,
|
||||||
line.From, line.To, line.Event)
|
line.From, line.To, line.Event)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var metricsArgs struct {
|
var metricsArgs struct {
|
||||||
|
@ -326,6 +326,9 @@ func runNetworkLockRemove(ctx context.Context, args []string) error {
|
|||||||
if !st.Enabled {
|
if !st.Enabled {
|
||||||
return errors.New("tailnet lock is not enabled")
|
return errors.New("tailnet lock is not enabled")
|
||||||
}
|
}
|
||||||
|
if len(st.TrustedKeys) == 1 {
|
||||||
|
return errors.New("cannot remove the last trusted signing key; use 'tailscale lock disable' to disable tailnet lock instead, or add another signing key before removing one")
|
||||||
|
}
|
||||||
|
|
||||||
if nlRemoveArgs.resign {
|
if nlRemoveArgs.resign {
|
||||||
// Validate we are not removing trust in ourselves while resigning. This is because
|
// Validate we are not removing trust in ourselves while resigning. This is because
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
go.mod
2
go.mod
@ -1,6 +1,6 @@
|
|||||||
module tailscale.com
|
module tailscale.com
|
||||||
|
|
||||||
go 1.24.0
|
go 1.24.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
filippo.io/mkcert v1.4.4
|
filippo.io/mkcert v1.4.4
|
||||||
|
@ -98,6 +98,7 @@ import (
|
|||||||
"tailscale.com/util/clientmetric"
|
"tailscale.com/util/clientmetric"
|
||||||
"tailscale.com/util/deephash"
|
"tailscale.com/util/deephash"
|
||||||
"tailscale.com/util/dnsname"
|
"tailscale.com/util/dnsname"
|
||||||
|
"tailscale.com/util/eventbus"
|
||||||
"tailscale.com/util/goroutines"
|
"tailscale.com/util/goroutines"
|
||||||
"tailscale.com/util/httpm"
|
"tailscale.com/util/httpm"
|
||||||
"tailscale.com/util/mak"
|
"tailscale.com/util/mak"
|
||||||
@ -168,6 +169,17 @@ type watchSession struct {
|
|||||||
|
|
||||||
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
||||||
|
|
||||||
|
var (
|
||||||
|
// errShutdown indicates that the [LocalBackend.Shutdown] was called.
|
||||||
|
errShutdown = errors.New("shutting down")
|
||||||
|
|
||||||
|
// errNodeContextChanged indicates that [LocalBackend] has switched
|
||||||
|
// to a different [localNodeContext], usually due to a profile change.
|
||||||
|
// It is used as a context cancellation cause for the old context
|
||||||
|
// and can be returned when an operation is performed on it.
|
||||||
|
errNodeContextChanged = errors.New("profile changed")
|
||||||
|
)
|
||||||
|
|
||||||
// LocalBackend is the glue between the major pieces of the Tailscale
|
// LocalBackend is the glue between the major pieces of the Tailscale
|
||||||
// network software: the cloud control plane (via controlclient), the
|
// network software: the cloud control plane (via controlclient), the
|
||||||
// network data plane (via wgengine), and the user-facing UIs and CLIs
|
// network data plane (via wgengine), and the user-facing UIs and CLIs
|
||||||
@ -180,11 +192,11 @@ var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detecte
|
|||||||
// state machine generates events back out to zero or more components.
|
// state machine generates events back out to zero or more components.
|
||||||
type LocalBackend struct {
|
type LocalBackend struct {
|
||||||
// Elements that are thread-safe or constant after construction.
|
// Elements that are thread-safe or constant after construction.
|
||||||
ctx context.Context // canceled by [LocalBackend.Shutdown]
|
ctx context.Context // canceled by [LocalBackend.Shutdown]
|
||||||
ctxCancel context.CancelFunc // cancels ctx
|
ctxCancel context.CancelCauseFunc // cancels ctx
|
||||||
logf logger.Logf // general logging
|
logf logger.Logf // general logging
|
||||||
keyLogf logger.Logf // for printing list of peers on change
|
keyLogf logger.Logf // for printing list of peers on change
|
||||||
statsLogf logger.Logf // for printing peers stats on change
|
statsLogf logger.Logf // for printing peers stats on change
|
||||||
sys *tsd.System
|
sys *tsd.System
|
||||||
health *health.Tracker // always non-nil
|
health *health.Tracker // always non-nil
|
||||||
metrics metrics
|
metrics metrics
|
||||||
@ -463,7 +475,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
|
|
||||||
envknob.LogCurrent(logf)
|
envknob.LogCurrent(logf)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancelCause(context.Background())
|
||||||
clock := tstime.StdClock{}
|
clock := tstime.StdClock{}
|
||||||
|
|
||||||
// Until we transition to a Running state, use a canceled context for
|
// Until we transition to a Running state, use a canceled context for
|
||||||
@ -503,7 +515,10 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||||||
captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running
|
captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running
|
||||||
needsCaptiveDetection: make(chan bool),
|
needsCaptiveDetection: make(chan bool),
|
||||||
}
|
}
|
||||||
b.currentNodeAtomic.Store(newNodeBackend())
|
nb := newNodeBackend(ctx, b.sys.Bus.Get())
|
||||||
|
b.currentNodeAtomic.Store(nb)
|
||||||
|
nb.ready()
|
||||||
|
|
||||||
mConn.SetNetInfoCallback(b.setNetInfo)
|
mConn.SetNetInfoCallback(b.setNetInfo)
|
||||||
|
|
||||||
if sys.InitialConfig != nil {
|
if sys.InitialConfig != nil {
|
||||||
@ -585,9 +600,18 @@ func (b *LocalBackend) currentNode() *nodeBackend {
|
|||||||
if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() {
|
if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
// Auto-init one in tests for LocalBackend created without the NewLocalBackend constructor...
|
// Auto-init [nodeBackend] in tests for LocalBackend created without the
|
||||||
v := newNodeBackend()
|
// NewLocalBackend() constructor. Same reasoning for checking b.sys.
|
||||||
b.currentNodeAtomic.CompareAndSwap(nil, v)
|
var bus *eventbus.Bus
|
||||||
|
if b.sys == nil {
|
||||||
|
bus = eventbus.New()
|
||||||
|
} else {
|
||||||
|
bus = b.sys.Bus.Get()
|
||||||
|
}
|
||||||
|
v := newNodeBackend(cmp.Or(b.ctx, context.Background()), bus)
|
||||||
|
if b.currentNodeAtomic.CompareAndSwap(nil, v) {
|
||||||
|
v.ready()
|
||||||
|
}
|
||||||
return b.currentNodeAtomic.Load()
|
return b.currentNodeAtomic.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1089,8 +1113,9 @@ func (b *LocalBackend) Shutdown() {
|
|||||||
if cc != nil {
|
if cc != nil {
|
||||||
cc.Shutdown()
|
cc.Shutdown()
|
||||||
}
|
}
|
||||||
|
b.ctxCancel(errShutdown)
|
||||||
|
b.currentNode().shutdown(errShutdown)
|
||||||
extHost.Shutdown()
|
extHost.Shutdown()
|
||||||
b.ctxCancel()
|
|
||||||
b.e.Close()
|
b.e.Close()
|
||||||
<-b.e.Done()
|
<-b.e.Done()
|
||||||
b.awaitNoGoroutinesInTest()
|
b.awaitNoGoroutinesInTest()
|
||||||
@ -6992,7 +7017,11 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err
|
|||||||
// down, so no need to do any work.
|
// down, so no need to do any work.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
b.currentNodeAtomic.Store(newNodeBackend())
|
newNode := newNodeBackend(b.ctx, b.sys.Bus.Get())
|
||||||
|
if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil {
|
||||||
|
oldNode.shutdown(errNodeContextChanged)
|
||||||
|
}
|
||||||
|
defer newNode.ready()
|
||||||
b.setNetMapLocked(nil) // Reset netmap.
|
b.setNetMapLocked(nil) // Reset netmap.
|
||||||
b.updateFilterLocked(ipn.PrefsView{})
|
b.updateFilterLocked(ipn.PrefsView{})
|
||||||
// Reset the NetworkMap in the engine
|
// Reset the NetworkMap in the engine
|
||||||
|
@ -5,6 +5,7 @@ package ipnlocal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"context"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"slices"
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
@ -22,9 +23,11 @@ import (
|
|||||||
"tailscale.com/types/ptr"
|
"tailscale.com/types/ptr"
|
||||||
"tailscale.com/types/views"
|
"tailscale.com/types/views"
|
||||||
"tailscale.com/util/dnsname"
|
"tailscale.com/util/dnsname"
|
||||||
|
"tailscale.com/util/eventbus"
|
||||||
"tailscale.com/util/mak"
|
"tailscale.com/util/mak"
|
||||||
"tailscale.com/util/slicesx"
|
"tailscale.com/util/slicesx"
|
||||||
"tailscale.com/wgengine/filter"
|
"tailscale.com/wgengine/filter"
|
||||||
|
"tailscale.com/wgengine/magicsock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nodeBackend is node-specific [LocalBackend] state. It is usually the current node.
|
// nodeBackend is node-specific [LocalBackend] state. It is usually the current node.
|
||||||
@ -39,7 +42,7 @@ import (
|
|||||||
// Two pointers to different [nodeBackend] instances represent different local nodes.
|
// Two pointers to different [nodeBackend] instances represent different local nodes.
|
||||||
// However, there's currently a bug where a new [nodeBackend] might not be created
|
// However, there's currently a bug where a new [nodeBackend] might not be created
|
||||||
// during an implicit node switch (see tailscale/corp#28014).
|
// during an implicit node switch (see tailscale/corp#28014).
|
||||||
|
//
|
||||||
// In the future, we might want to include at least the following in this struct (in addition to the current fields).
|
// In the future, we might want to include at least the following in this struct (in addition to the current fields).
|
||||||
// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions,
|
// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions,
|
||||||
// peer API handlers, etc.).
|
// peer API handlers, etc.).
|
||||||
@ -61,13 +64,24 @@ import (
|
|||||||
// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature
|
// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature
|
||||||
// into a separate package and have it install proper hooks.
|
// into a separate package and have it install proper hooks.
|
||||||
type nodeBackend struct {
|
type nodeBackend struct {
|
||||||
|
ctx context.Context // canceled by [nodeBackend.shutdown]
|
||||||
|
ctxCancel context.CancelCauseFunc // cancels ctx
|
||||||
|
|
||||||
// filterAtomic is a stateful packet filter. Immutable once created, but can be
|
// filterAtomic is a stateful packet filter. Immutable once created, but can be
|
||||||
// replaced with a new one.
|
// replaced with a new one.
|
||||||
filterAtomic atomic.Pointer[filter.Filter]
|
filterAtomic atomic.Pointer[filter.Filter]
|
||||||
|
|
||||||
|
// initialized once and immutable
|
||||||
|
eventClient *eventbus.Client
|
||||||
|
filterUpdates *eventbus.Publisher[magicsock.FilterUpdate]
|
||||||
|
nodeUpdates *eventbus.Publisher[magicsock.NodeAddrsHostInfoUpdate]
|
||||||
|
|
||||||
// TODO(nickkhyl): maybe use sync.RWMutex?
|
// TODO(nickkhyl): maybe use sync.RWMutex?
|
||||||
mu sync.Mutex // protects the following fields
|
mu sync.Mutex // protects the following fields
|
||||||
|
|
||||||
|
shutdownOnce sync.Once // guards calling [nodeBackend.shutdown]
|
||||||
|
readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown
|
||||||
|
|
||||||
// NetMap is the most recently set full netmap from the controlclient.
|
// NetMap is the most recently set full netmap from the controlclient.
|
||||||
// It can't be mutated in place once set. Because it can't be mutated in place,
|
// It can't be mutated in place once set. Because it can't be mutated in place,
|
||||||
// delta updates from the control server don't apply to it. Instead, use
|
// delta updates from the control server don't apply to it. Instead, use
|
||||||
@ -88,12 +102,28 @@ type nodeBackend struct {
|
|||||||
nodeByAddr map[netip.Addr]tailcfg.NodeID
|
nodeByAddr map[netip.Addr]tailcfg.NodeID
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeBackend() *nodeBackend {
|
func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend {
|
||||||
cn := &nodeBackend{}
|
ctx, ctxCancel := context.WithCancelCause(ctx)
|
||||||
|
nb := &nodeBackend{
|
||||||
|
ctx: ctx,
|
||||||
|
ctxCancel: ctxCancel,
|
||||||
|
eventClient: bus.Client("ipnlocal.nodeBackend"),
|
||||||
|
readyCh: make(chan struct{}),
|
||||||
|
}
|
||||||
// Default filter blocks everything and logs nothing.
|
// Default filter blocks everything and logs nothing.
|
||||||
noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{})
|
noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{})
|
||||||
cn.filterAtomic.Store(noneFilter)
|
nb.filterAtomic.Store(noneFilter)
|
||||||
return cn
|
nb.filterUpdates = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient)
|
||||||
|
nb.nodeUpdates = eventbus.Publish[magicsock.NodeAddrsHostInfoUpdate](nb.eventClient)
|
||||||
|
nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()})
|
||||||
|
return nb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context returns a context that is canceled when the [nodeBackend] shuts down,
|
||||||
|
// either because [LocalBackend] is switching to a different [nodeBackend]
|
||||||
|
// or is shutting down itself.
|
||||||
|
func (nb *nodeBackend) Context() context.Context {
|
||||||
|
return nb.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nb *nodeBackend) Self() tailcfg.NodeView {
|
func (nb *nodeBackend) Self() tailcfg.NodeView {
|
||||||
@ -399,9 +429,16 @@ func (nb *nodeBackend) updatePeersLocked() {
|
|||||||
nb.peers[k] = tailcfg.NodeView{}
|
nb.peers[k] = tailcfg.NodeView{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
changed := magicsock.NodeAddrsHostInfoUpdate{
|
||||||
|
Complete: true,
|
||||||
|
}
|
||||||
// Second pass, add everything wanted.
|
// Second pass, add everything wanted.
|
||||||
for _, p := range nm.Peers {
|
for _, p := range nm.Peers {
|
||||||
mak.Set(&nb.peers, p.ID(), p)
|
mak.Set(&nb.peers, p.ID(), p)
|
||||||
|
mak.Set(&changed.NodesByID, p.ID(), magicsock.NodeAddrsHostInfo{
|
||||||
|
Addresses: p.Addresses(),
|
||||||
|
Hostinfo: p.Hostinfo(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Third pass, remove deleted things.
|
// Third pass, remove deleted things.
|
||||||
@ -410,6 +447,7 @@ func (nb *nodeBackend) updatePeersLocked() {
|
|||||||
delete(nb.peers, k)
|
delete(nb.peers, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
nb.nodeUpdates.Publish(changed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
|
func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
|
||||||
@ -424,6 +462,9 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo
|
|||||||
// call (e.g. its endpoints + online status both change)
|
// call (e.g. its endpoints + online status both change)
|
||||||
var mutableNodes map[tailcfg.NodeID]*tailcfg.Node
|
var mutableNodes map[tailcfg.NodeID]*tailcfg.Node
|
||||||
|
|
||||||
|
changed := magicsock.NodeAddrsHostInfoUpdate{
|
||||||
|
Complete: false,
|
||||||
|
}
|
||||||
for _, m := range muts {
|
for _, m := range muts {
|
||||||
n, ok := mutableNodes[m.NodeIDBeingMutated()]
|
n, ok := mutableNodes[m.NodeIDBeingMutated()]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -438,8 +479,14 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo
|
|||||||
m.Apply(n)
|
m.Apply(n)
|
||||||
}
|
}
|
||||||
for nid, n := range mutableNodes {
|
for nid, n := range mutableNodes {
|
||||||
nb.peers[nid] = n.View()
|
nv := n.View()
|
||||||
|
nb.peers[nid] = nv
|
||||||
|
mak.Set(&changed.NodesByID, nid, magicsock.NodeAddrsHostInfo{
|
||||||
|
Addresses: nv.Addresses(),
|
||||||
|
Hostinfo: nv.Hostinfo(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
nb.nodeUpdates.Publish(changed)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,6 +508,7 @@ func (nb *nodeBackend) filter() *filter.Filter {
|
|||||||
|
|
||||||
func (nb *nodeBackend) setFilter(f *filter.Filter) {
|
func (nb *nodeBackend) setFilter(f *filter.Filter) {
|
||||||
nb.filterAtomic.Store(f)
|
nb.filterAtomic.Store(f)
|
||||||
|
nb.filterUpdates.Publish(magicsock.FilterUpdate{Filter: f})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
|
func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
|
||||||
@ -475,6 +523,60 @@ func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (doh
|
|||||||
return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID)
|
return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ready signals that [LocalBackend] has completed the switch to this [nodeBackend]
|
||||||
|
// and any pending calls to [nodeBackend.Wait] must be unblocked.
|
||||||
|
func (nb *nodeBackend) ready() {
|
||||||
|
nb.mu.Lock()
|
||||||
|
defer nb.mu.Unlock()
|
||||||
|
if nb.readyCh != nil {
|
||||||
|
close(nb.readyCh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait blocks until [LocalBackend] completes the switch to this [nodeBackend]
|
||||||
|
// and calls [nodeBackend.ready]. It returns an error if the provided context
|
||||||
|
// is canceled or if the [nodeBackend] shuts down or is already shut down.
|
||||||
|
//
|
||||||
|
// It must not be called with the [LocalBackend]'s internal mutex held as [LocalBackend]
|
||||||
|
// may need to acquire it to complete the switch.
|
||||||
|
//
|
||||||
|
// TODO(nickkhyl): Relax this restriction once [LocalBackend]'s state machine
|
||||||
|
// runs in its own goroutine, or if we decide that waiting for the state machine
|
||||||
|
// restart to finish isn't necessary for [LocalBackend] to consider the switch complete.
|
||||||
|
// We mostly need this because of [LocalBackend.Start] acquiring b.mu and the fact that
|
||||||
|
// methods like [LocalBackend.SwitchProfile] must report any errors returned by it.
|
||||||
|
// Perhaps we could report those errors asynchronously as [health.Warnable]s?
|
||||||
|
func (nb *nodeBackend) Wait(ctx context.Context) error {
|
||||||
|
nb.mu.Lock()
|
||||||
|
readyCh := nb.readyCh
|
||||||
|
nb.mu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-nb.ctx.Done():
|
||||||
|
return context.Cause(nb.ctx)
|
||||||
|
case <-readyCh:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// shutdown shuts down the [nodeBackend] and cancels its context
|
||||||
|
// with the provided cause.
|
||||||
|
func (nb *nodeBackend) shutdown(cause error) {
|
||||||
|
nb.shutdownOnce.Do(func() {
|
||||||
|
nb.doShutdown(cause)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nb *nodeBackend) doShutdown(cause error) {
|
||||||
|
nb.mu.Lock()
|
||||||
|
defer nb.mu.Unlock()
|
||||||
|
nb.ctxCancel(cause)
|
||||||
|
nb.readyCh = nil
|
||||||
|
nb.eventClient.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// dnsConfigForNetmap returns a *dns.Config for the given netmap,
|
// dnsConfigForNetmap returns a *dns.Config for the given netmap,
|
||||||
// prefs, client OS version, and cloud hosting environment.
|
// prefs, client OS version, and cloud hosting environment.
|
||||||
//
|
//
|
||||||
|
123
ipn/ipnlocal/node_backend_test.go
Normal file
123
ipn/ipnlocal/node_backend_test.go
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
package ipnlocal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"tailscale.com/util/eventbus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNodeBackendReadiness(t *testing.T) {
|
||||||
|
nb := newNodeBackend(t.Context(), eventbus.New())
|
||||||
|
|
||||||
|
// The node backend is not ready until [nodeBackend.ready] is called,
|
||||||
|
// and [nodeBackend.Wait] should fail with [context.DeadlineExceeded].
|
||||||
|
ctx, cancelCtx := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
|
defer cancelCtx()
|
||||||
|
if err := nb.Wait(ctx); err != ctx.Err() {
|
||||||
|
t.Fatalf("Wait: got %v; want %v", err, ctx.Err())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start a goroutine to wait for the node backend to become ready.
|
||||||
|
waitDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if err := nb.Wait(context.Background()); err != nil {
|
||||||
|
t.Errorf("Wait: got %v; want nil", err)
|
||||||
|
}
|
||||||
|
close(waitDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Call [nodeBackend.ready] to indicate that the node backend is now ready.
|
||||||
|
go nb.ready()
|
||||||
|
|
||||||
|
// Once the backend is called, [nodeBackend.Wait] should return immediately without error.
|
||||||
|
if err := nb.Wait(context.Background()); err != nil {
|
||||||
|
t.Fatalf("Wait: got %v; want nil", err)
|
||||||
|
}
|
||||||
|
// And any pending waiters should also be unblocked.
|
||||||
|
<-waitDone
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeBackendShutdown(t *testing.T) {
|
||||||
|
nb := newNodeBackend(t.Context(), eventbus.New())
|
||||||
|
|
||||||
|
shutdownCause := errors.New("test shutdown")
|
||||||
|
|
||||||
|
// Start a goroutine to wait for the node backend to become ready.
|
||||||
|
// This test expects it to block until the node backend shuts down
|
||||||
|
// and then return the specified shutdown cause.
|
||||||
|
waitDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
if err := nb.Wait(context.Background()); err != shutdownCause {
|
||||||
|
t.Errorf("Wait: got %v; want %v", err, shutdownCause)
|
||||||
|
}
|
||||||
|
close(waitDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Call [nodeBackend.shutdown] to indicate that the node backend is shutting down.
|
||||||
|
nb.shutdown(shutdownCause)
|
||||||
|
|
||||||
|
// Calling it again is fine, but should not change the shutdown cause.
|
||||||
|
nb.shutdown(errors.New("test shutdown again"))
|
||||||
|
|
||||||
|
// After shutdown, [nodeBackend.Wait] should return with the specified shutdown cause.
|
||||||
|
if err := nb.Wait(context.Background()); err != shutdownCause {
|
||||||
|
t.Fatalf("Wait: got %v; want %v", err, shutdownCause)
|
||||||
|
}
|
||||||
|
// The context associated with the node backend should also be cancelled
|
||||||
|
// and its cancellation cause should match the shutdown cause.
|
||||||
|
if err := nb.Context().Err(); !errors.Is(err, context.Canceled) {
|
||||||
|
t.Fatalf("Context.Err: got %v; want %v", err, context.Canceled)
|
||||||
|
}
|
||||||
|
if cause := context.Cause(nb.Context()); cause != shutdownCause {
|
||||||
|
t.Fatalf("Cause: got %v; want %v", cause, shutdownCause)
|
||||||
|
}
|
||||||
|
// And any pending waiters should also be unblocked.
|
||||||
|
<-waitDone
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeBackendReadyAfterShutdown(t *testing.T) {
|
||||||
|
nb := newNodeBackend(t.Context(), eventbus.New())
|
||||||
|
|
||||||
|
shutdownCause := errors.New("test shutdown")
|
||||||
|
nb.shutdown(shutdownCause)
|
||||||
|
nb.ready() // Calling ready after shutdown is a no-op, but should not panic, etc.
|
||||||
|
if err := nb.Wait(context.Background()); err != shutdownCause {
|
||||||
|
t.Fatalf("Wait: got %v; want %v", err, shutdownCause)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeBackendParentContextCancellation(t *testing.T) {
|
||||||
|
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||||
|
nb := newNodeBackend(ctx, eventbus.New())
|
||||||
|
|
||||||
|
cancelCtx()
|
||||||
|
|
||||||
|
// Cancelling the parent context should cause [nodeBackend.Wait]
|
||||||
|
// to return with [context.Canceled].
|
||||||
|
if err := nb.Wait(context.Background()); !errors.Is(err, context.Canceled) {
|
||||||
|
t.Fatalf("Wait: got %v; want %v", err, context.Canceled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// And the node backend's context should also be cancelled.
|
||||||
|
if err := nb.Context().Err(); !errors.Is(err, context.Canceled) {
|
||||||
|
t.Fatalf("Context.Err: got %v; want %v", err, context.Canceled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) {
|
||||||
|
nb := newNodeBackend(t.Context(), eventbus.New())
|
||||||
|
|
||||||
|
// Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently
|
||||||
|
// should not cause issues, and [nodeBackend.Wait] should unblock,
|
||||||
|
// but the result of [nodeBackend.Wait] is intentionally undefined.
|
||||||
|
go nb.ready()
|
||||||
|
go nb.shutdown(errors.New("test shutdown"))
|
||||||
|
|
||||||
|
nb.Wait(context.Background())
|
||||||
|
}
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package ipn
|
package ipn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
117
license_test.go
Normal file
117
license_test.go
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
package tailscaleroot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"tailscale.com/util/set"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizeLineEndings(b []byte) []byte {
|
||||||
|
return bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLicenseHeaders checks that all Go files in the tree
|
||||||
|
// directory tree have a correct-looking Tailscale license header.
|
||||||
|
func TestLicenseHeaders(t *testing.T) {
|
||||||
|
want := normalizeLineEndings([]byte(strings.TrimLeft(`
|
||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
`, "\n")))
|
||||||
|
|
||||||
|
exceptions := set.Of(
|
||||||
|
// Subprocess test harness code
|
||||||
|
"util/winutil/testdata/testrestartableprocesses/main.go",
|
||||||
|
"util/winutil/subprocess_windows_test.go",
|
||||||
|
|
||||||
|
// WireGuard copyright
|
||||||
|
"cmd/tailscale/cli/authenticode_windows.go",
|
||||||
|
"wgengine/router/ifconfig_windows.go",
|
||||||
|
|
||||||
|
// noiseexplorer.com copyright
|
||||||
|
"control/controlbase/noiseexplorer_test.go",
|
||||||
|
|
||||||
|
// Generated eBPF management code
|
||||||
|
"derp/xdp/bpf_bpfeb.go",
|
||||||
|
"derp/xdp/bpf_bpfel.go",
|
||||||
|
|
||||||
|
// Generated kube deepcopy funcs file starts with a Go build tag + an empty line
|
||||||
|
"k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go",
|
||||||
|
)
|
||||||
|
|
||||||
|
err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("path %s: %v", path, err)
|
||||||
|
}
|
||||||
|
if exceptions.Contains(filepath.ToSlash(path)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
base := filepath.Base(path)
|
||||||
|
switch base {
|
||||||
|
case ".git", "node_modules", "tempfork":
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
switch base {
|
||||||
|
case "zsyscall_windows.go":
|
||||||
|
// Generated code.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(base, ".config.ts") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(base, "_string.go") {
|
||||||
|
// Generated file from go:generate stringer
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ext := filepath.Ext(base)
|
||||||
|
switch ext {
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
case ".go", ".ts", ".tsx":
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 512)
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if n, err := io.ReadAtLeast(f, buf, 512); err != nil && err != io.ErrUnexpectedEOF {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
buf = buf[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = normalizeLineEndings(buf)
|
||||||
|
|
||||||
|
bufNoTrunc := buf
|
||||||
|
if i := bytes.Index(buf, []byte("\npackage ")); i != -1 {
|
||||||
|
buf = buf[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Contains(buf, want) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Contains(bufNoTrunc, []byte("BSD-3-Clause\npackage ")) {
|
||||||
|
t.Errorf("file %s has license header as a package doc; add a blank line before the package line", path)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Errorf("file %s is missing Tailscale copyright header:\n\n%s", path, want)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Walk: %v", err)
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package tstun
|
package tstun
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,77 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
# check_license_headers.sh checks that all Go files in the given
|
|
||||||
# directory tree have a correct-looking Tailscale license header.
|
|
||||||
|
|
||||||
check_file() {
|
|
||||||
got=$1
|
|
||||||
|
|
||||||
want=$(cat <<EOF
|
|
||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
if [ "$got" = "$want" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ $# != 1 ]; then
|
|
||||||
echo "Usage: $0 rootdir" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
fail=0
|
|
||||||
for file in $(find $1 \( -name '*.go' -or -name '*.tsx' -or -name '*.ts' -not -name '*.config.ts' \) -not -path '*/.git/*' -not -path '*/node_modules/*'); do
|
|
||||||
case $file in
|
|
||||||
$1/tempfork/*)
|
|
||||||
# Skip, tempfork of third-party code
|
|
||||||
;;
|
|
||||||
$1/wgengine/router/ifconfig_windows.go)
|
|
||||||
# WireGuard copyright.
|
|
||||||
;;
|
|
||||||
$1/cmd/tailscale/cli/authenticode_windows.go)
|
|
||||||
# WireGuard copyright.
|
|
||||||
;;
|
|
||||||
*_string.go)
|
|
||||||
# Generated file from go:generate stringer
|
|
||||||
;;
|
|
||||||
$1/control/controlbase/noiseexplorer_test.go)
|
|
||||||
# Noiseexplorer.com copyright.
|
|
||||||
;;
|
|
||||||
*/zsyscall_windows.go)
|
|
||||||
# Generated syscall wrappers
|
|
||||||
;;
|
|
||||||
$1/util/winutil/subprocess_windows_test.go)
|
|
||||||
# Subprocess test harness code
|
|
||||||
;;
|
|
||||||
$1/util/winutil/testdata/testrestartableprocesses/main.go)
|
|
||||||
# Subprocess test harness code
|
|
||||||
;;
|
|
||||||
*$1/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go)
|
|
||||||
# Generated kube deepcopy funcs file starts with a Go build tag + an empty line
|
|
||||||
header="$(head -5 $file | tail -n+3 )"
|
|
||||||
;;
|
|
||||||
$1/derp/xdp/bpf_bpfe*.go)
|
|
||||||
# Generated eBPF management code
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
header="$(head -2 $file)"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
if [ ! -z "$header" ]; then
|
|
||||||
if ! check_file "$header"; then
|
|
||||||
fail=1
|
|
||||||
echo "${file#$1/} doesn't have the right copyright header:"
|
|
||||||
echo "$header" | sed -e 's/^/ /g'
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $fail -ne 0 ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
@ -5,6 +5,7 @@ package tka
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
@ -90,6 +91,20 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) {
|
|||||||
if _, err := a.state.GetKey(key2.MustID()); err != ErrNoSuchKey {
|
if _, err := a.state.GetKey(key2.MustID()); err != ErrNoSuchKey {
|
||||||
t.Errorf("GetKey(key2).err = %v, want %v", err, ErrNoSuchKey)
|
t.Errorf("GetKey(key2).err = %v, want %v", err, ErrNoSuchKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that removing the remaining key errors out.
|
||||||
|
b = a.NewUpdater(signer25519(priv))
|
||||||
|
if err := b.RemoveKey(key.MustID()); err != nil {
|
||||||
|
t.Fatalf("RemoveKey(%v) failed: %v", key, err)
|
||||||
|
}
|
||||||
|
updates, err = b.Finalize(storage)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Finalize() failed: %v", err)
|
||||||
|
}
|
||||||
|
wantErr := "cannot remove the last key"
|
||||||
|
if err := a.Inform(storage, updates); err == nil || !strings.Contains(err.Error(), wantErr) {
|
||||||
|
t.Fatalf("expected Inform() to return error %q, got: %v", wantErr, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthorityBuilderSetKeyVote(t *testing.T) {
|
func TestAuthorityBuilderSetKeyVote(t *testing.T) {
|
||||||
|
@ -440,6 +440,13 @@ func aumVerify(aum AUM, state State, isGenesisAUM bool) error {
|
|||||||
return fmt.Errorf("signature %d: %v", i, err)
|
return fmt.Errorf("signature %d: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if aum.MessageKind == AUMRemoveKey && len(state.Keys) == 1 {
|
||||||
|
if kid, err := state.Keys[0].ID(); err == nil && bytes.Equal(aum.KeyID, kid) {
|
||||||
|
return errors.New("cannot remove the last key in the state")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,29 +87,29 @@ func (a *authorization) Refresh(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *authorization) AllowsHost(addr netip.Addr) bool {
|
func (a *authorization) AllowsHost(addr netip.Addr) bool {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
if a.peers == nil {
|
if a.peers == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
return a.peers.addrs.Contains(addr)
|
return a.peers.addrs.Contains(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *authorization) SelfAllowed() bool {
|
func (a *authorization) SelfAllowed() bool {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
if a.peers == nil {
|
if a.peers == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
return a.peers.status.Self.Tags != nil && views.SliceContains(*a.peers.status.Self.Tags, a.tag)
|
return a.peers.status.Self.Tags != nil && views.SliceContains(*a.peers.status.Self.Tags, a.tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *authorization) AllowedPeers() views.Slice[*ipnstate.PeerStatus] {
|
func (a *authorization) AllowedPeers() views.Slice[*ipnstate.PeerStatus] {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
if a.peers == nil {
|
if a.peers == nil {
|
||||||
return views.Slice[*ipnstate.PeerStatus]{}
|
return views.Slice[*ipnstate.PeerStatus]{}
|
||||||
}
|
}
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
return views.SliceOf(a.peers.statuses)
|
return views.SliceOf(a.peers.statuses)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
// Copyright (c) Tailscale Inc & AUTHORS
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
// SPDX-License-Identifier: BSD-3-Clause
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
package promvarz
|
package promvarz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -113,15 +113,16 @@ func (c *Client) shouldPublish(t reflect.Type) bool {
|
|||||||
// Subscribe requests delivery of events of type T through the given
|
// Subscribe requests delivery of events of type T through the given
|
||||||
// Queue. Panics if the queue already has a subscriber for T.
|
// Queue. Panics if the queue already has a subscriber for T.
|
||||||
func Subscribe[T any](c *Client) *Subscriber[T] {
|
func Subscribe[T any](c *Client) *Subscriber[T] {
|
||||||
return newSubscriber[T](c.subscribeState())
|
r := c.subscribeState()
|
||||||
|
s := newSubscriber[T](r)
|
||||||
|
r.addSubscriber(s)
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publisher returns a publisher for event type T using the given
|
// Publisher returns a publisher for event type T using the given
|
||||||
// client.
|
// client.
|
||||||
func Publish[T any](c *Client) *Publisher[T] {
|
func Publish[T any](c *Client) *Publisher[T] {
|
||||||
ret := newPublisher[T](c)
|
p := newPublisher[T](c)
|
||||||
c.mu.Lock()
|
c.addPublisher(p)
|
||||||
defer c.mu.Unlock()
|
return p
|
||||||
c.pub.Add(ret)
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
@ -21,11 +21,7 @@ type Publisher[T any] struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newPublisher[T any](c *Client) *Publisher[T] {
|
func newPublisher[T any](c *Client) *Publisher[T] {
|
||||||
ret := &Publisher[T]{
|
return &Publisher[T]{client: c}
|
||||||
client: c,
|
|
||||||
}
|
|
||||||
c.addPublisher(ret)
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the publisher.
|
// Close closes the publisher.
|
||||||
|
@ -91,7 +91,7 @@ func (q *subscribeState) pump(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Keep the cases in this select in sync with
|
// Keep the cases in this select in sync with
|
||||||
// Subscriber.dispatch below. The only different should be
|
// Subscriber.dispatch below. The only difference should be
|
||||||
// that this select doesn't deliver queued values to
|
// that this select doesn't deliver queued values to
|
||||||
// anyone, and unconditionally accepts new values.
|
// anyone, and unconditionally accepts new values.
|
||||||
select {
|
select {
|
||||||
@ -134,9 +134,10 @@ func (s *subscribeState) subscribeTypes() []reflect.Type {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) {
|
func (s *subscribeState) addSubscriber(sub subscriber) {
|
||||||
s.outputsMu.Lock()
|
s.outputsMu.Lock()
|
||||||
defer s.outputsMu.Unlock()
|
defer s.outputsMu.Unlock()
|
||||||
|
t := sub.subscribeType()
|
||||||
if s.outputs[t] != nil {
|
if s.outputs[t] != nil {
|
||||||
panic(fmt.Errorf("double subscription for event %s", t))
|
panic(fmt.Errorf("double subscription for event %s", t))
|
||||||
}
|
}
|
||||||
@ -183,15 +184,10 @@ type Subscriber[T any] struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newSubscriber[T any](r *subscribeState) *Subscriber[T] {
|
func newSubscriber[T any](r *subscribeState) *Subscriber[T] {
|
||||||
t := reflect.TypeFor[T]()
|
return &Subscriber[T]{
|
||||||
|
|
||||||
ret := &Subscriber[T]{
|
|
||||||
read: make(chan T),
|
read: make(chan T),
|
||||||
unregister: func() { r.deleteSubscriber(t) },
|
unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) },
|
||||||
}
|
}
|
||||||
r.addSubscriber(t, ret)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] {
|
func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] {
|
||||||
|
@ -23,3 +23,11 @@ func Get[T any](v T, err error) T {
|
|||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get2 returns v1 and v2 as is. It panics if err is non-nil.
|
||||||
|
func Get2[T any, U any](v1 T, v2 U, err error) (T, U) {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v1, v2
|
||||||
|
}
|
||||||
|
@ -63,6 +63,7 @@ import (
|
|||||||
"tailscale.com/util/set"
|
"tailscale.com/util/set"
|
||||||
"tailscale.com/util/testenv"
|
"tailscale.com/util/testenv"
|
||||||
"tailscale.com/util/usermetric"
|
"tailscale.com/util/usermetric"
|
||||||
|
"tailscale.com/wgengine/filter"
|
||||||
"tailscale.com/wgengine/wgint"
|
"tailscale.com/wgengine/wgint"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -502,6 +503,30 @@ func (o *Options) derpActiveFunc() func() {
|
|||||||
return o.DERPActiveFunc
|
return o.DERPActiveFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeAddrsHostInfoUpdate represents an update event of the addresses and
|
||||||
|
// [tailcfg.HostInfoView] for a node set. This event is published over an
|
||||||
|
// [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as of 2025-06. If
|
||||||
|
// you are adding more subscribers consider moving this type out of magicsock.
|
||||||
|
type NodeAddrsHostInfoUpdate struct {
|
||||||
|
NodesByID map[tailcfg.NodeID]NodeAddrsHostInfo
|
||||||
|
Complete bool // true if NodesByID contains all known nodes, false if it may be a subset
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeAddrsHostInfo represents the addresses and [tailcfg.HostinfoView] for a
|
||||||
|
// Tailscale node.
|
||||||
|
type NodeAddrsHostInfo struct {
|
||||||
|
Addresses views.Slice[netip.Prefix]
|
||||||
|
Hostinfo tailcfg.HostinfoView
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterUpdate represents an update event for a [*filter.Filter]. This event is
|
||||||
|
// signaled over an [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as
|
||||||
|
// of 2025-06. If you are adding more subscribers consider moving this type out
|
||||||
|
// of magicsock.
|
||||||
|
type FilterUpdate struct {
|
||||||
|
*filter.Filter
|
||||||
|
}
|
||||||
|
|
||||||
// newConn is the error-free, network-listening-side-effect-free based
|
// newConn is the error-free, network-listening-side-effect-free based
|
||||||
// of NewConn. Mostly for tests.
|
// of NewConn. Mostly for tests.
|
||||||
func newConn(logf logger.Logf) *Conn {
|
func newConn(logf logger.Logf) *Conn {
|
||||||
@ -535,6 +560,20 @@ func newConn(logf logger.Logf) *Conn {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// consumeEventbusTopic consumes events from sub and passes them to
|
||||||
|
// handlerFn until sub.Done() is closed.
|
||||||
|
func consumeEventbusTopic[T any](sub *eventbus.Subscriber[T], handlerFn func(t T)) {
|
||||||
|
defer sub.Close()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case evt := <-sub.Events():
|
||||||
|
handlerFn(evt)
|
||||||
|
case <-sub.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewConn creates a magic Conn listening on opts.Port.
|
// NewConn creates a magic Conn listening on opts.Port.
|
||||||
// As the set of possible endpoints for a Conn changes, the
|
// As the set of possible endpoints for a Conn changes, the
|
||||||
// callback opts.EndpointsFunc is called.
|
// callback opts.EndpointsFunc is called.
|
||||||
@ -562,17 +601,17 @@ func NewConn(opts Options) (*Conn, error) {
|
|||||||
c.eventClient = c.eventBus.Client("magicsock.Conn")
|
c.eventClient = c.eventBus.Client("magicsock.Conn")
|
||||||
|
|
||||||
pmSub := eventbus.Subscribe[portmapper.Mapping](c.eventClient)
|
pmSub := eventbus.Subscribe[portmapper.Mapping](c.eventClient)
|
||||||
go func() {
|
go consumeEventbusTopic(pmSub, func(_ portmapper.Mapping) {
|
||||||
defer pmSub.Close()
|
c.onPortMapChanged()
|
||||||
for {
|
})
|
||||||
select {
|
filterSub := eventbus.Subscribe[FilterUpdate](c.eventClient)
|
||||||
case <-pmSub.Events():
|
go consumeEventbusTopic(filterSub, func(t FilterUpdate) {
|
||||||
c.onPortMapChanged()
|
// TODO(jwhited): implement
|
||||||
case <-pmSub.Done():
|
})
|
||||||
return
|
nodeSub := eventbus.Subscribe[NodeAddrsHostInfoUpdate](c.eventClient)
|
||||||
}
|
go consumeEventbusTopic(nodeSub, func(t NodeAddrsHostInfoUpdate) {
|
||||||
}
|
// TODO(jwhited): implement
|
||||||
}()
|
})
|
||||||
|
|
||||||
// Disable the explicit callback from the portmapper, the subscriber handles it.
|
// Disable the explicit callback from the portmapper, the subscriber handles it.
|
||||||
onPortMapChanged = nil
|
onPortMapChanged = nil
|
||||||
@ -2798,6 +2837,10 @@ func (c *connBind) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
c.closed = true
|
c.closed = true
|
||||||
|
// Close the [eventbus.Client].
|
||||||
|
if c.eventClient != nil {
|
||||||
|
c.eventClient.Close()
|
||||||
|
}
|
||||||
// Unblock all outstanding receives.
|
// Unblock all outstanding receives.
|
||||||
c.pconn4.Close()
|
c.pconn4.Close()
|
||||||
c.pconn6.Close()
|
c.pconn6.Close()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user