mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-17 12:37:27 +00:00
Compare commits
13 Commits
bug_report
...
v0.23.0-al
Author | SHA1 | Date | |
---|---|---|---|
![]() |
88af29d5f5 | ||
![]() |
9cedc2942b | ||
![]() |
062b9a5611 | ||
![]() |
887302e8f1 | ||
![]() |
b1b90d165d | ||
![]() |
4ea12f472a | ||
![]() |
b4210e2c90 | ||
![]() |
a369d57a17 | ||
![]() |
1e22f17f36 | ||
![]() |
65376e2842 | ||
![]() |
7e8bf4bfe5 | ||
![]() |
3b103280ef | ||
![]() |
a592ae56b4 |
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -20,6 +20,6 @@ jobs:
|
|||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
|
||||||
- name: Run goreleaser
|
- name: Run goreleaser
|
||||||
run: nix develop --command -- goreleaser release --clean
|
run: nix develop --command -- goreleaser release --clean --verbose
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
67
.github/workflows/test-integration-v2-TestEnableDisableAutoApprovedRoute.yaml
vendored
Normal file
67
.github/workflows/test-integration-v2-TestEnableDisableAutoApprovedRoute.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
|
name: Integration Test v2 - TestEnableDisableAutoApprovedRoute
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
TestEnableDisableAutoApprovedRoute:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Get changed files
|
||||||
|
id: changed-files
|
||||||
|
uses: tj-actions/changed-files@v34
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
*.nix
|
||||||
|
go.*
|
||||||
|
**/*.go
|
||||||
|
integration_test/
|
||||||
|
config-example.yaml
|
||||||
|
|
||||||
|
- name: Run TestEnableDisableAutoApprovedRoute
|
||||||
|
uses: Wandalen/wretry.action@master
|
||||||
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
attempt_limit: 5
|
||||||
|
command: |
|
||||||
|
nix develop --command -- docker run \
|
||||||
|
--tty --rm \
|
||||||
|
--volume ~/.cache/hs-integration-go:/go \
|
||||||
|
--name headscale-test-suite \
|
||||||
|
--volume $PWD:$PWD -w $PWD/integration \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--volume $PWD/control_logs:/tmp/control \
|
||||||
|
golang:1 \
|
||||||
|
go run gotest.tools/gotestsum@latest -- ./... \
|
||||||
|
-failfast \
|
||||||
|
-timeout 120m \
|
||||||
|
-parallel 1 \
|
||||||
|
-run "^TestEnableDisableAutoApprovedRoute$"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: "control_logs/*.log"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: pprof
|
||||||
|
path: "control_logs/*.pprof.tar"
|
67
.github/workflows/test-integration-v2-TestNodeAdvertiseTagNoACLCommand.yaml
vendored
Normal file
67
.github/workflows/test-integration-v2-TestNodeAdvertiseTagNoACLCommand.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
|
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
TestNodeAdvertiseTagNoACLCommand:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Get changed files
|
||||||
|
id: changed-files
|
||||||
|
uses: tj-actions/changed-files@v34
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
*.nix
|
||||||
|
go.*
|
||||||
|
**/*.go
|
||||||
|
integration_test/
|
||||||
|
config-example.yaml
|
||||||
|
|
||||||
|
- name: Run TestNodeAdvertiseTagNoACLCommand
|
||||||
|
uses: Wandalen/wretry.action@master
|
||||||
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
attempt_limit: 5
|
||||||
|
command: |
|
||||||
|
nix develop --command -- docker run \
|
||||||
|
--tty --rm \
|
||||||
|
--volume ~/.cache/hs-integration-go:/go \
|
||||||
|
--name headscale-test-suite \
|
||||||
|
--volume $PWD:$PWD -w $PWD/integration \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--volume $PWD/control_logs:/tmp/control \
|
||||||
|
golang:1 \
|
||||||
|
go run gotest.tools/gotestsum@latest -- ./... \
|
||||||
|
-failfast \
|
||||||
|
-timeout 120m \
|
||||||
|
-parallel 1 \
|
||||||
|
-run "^TestNodeAdvertiseTagNoACLCommand$"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: "control_logs/*.log"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: pprof
|
||||||
|
path: "control_logs/*.pprof.tar"
|
67
.github/workflows/test-integration-v2-TestNodeAdvertiseTagWithACLCommand.yaml
vendored
Normal file
67
.github/workflows/test-integration-v2-TestNodeAdvertiseTagWithACLCommand.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
|
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
TestNodeAdvertiseTagWithACLCommand:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Get changed files
|
||||||
|
id: changed-files
|
||||||
|
uses: tj-actions/changed-files@v34
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
*.nix
|
||||||
|
go.*
|
||||||
|
**/*.go
|
||||||
|
integration_test/
|
||||||
|
config-example.yaml
|
||||||
|
|
||||||
|
- name: Run TestNodeAdvertiseTagWithACLCommand
|
||||||
|
uses: Wandalen/wretry.action@master
|
||||||
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
attempt_limit: 5
|
||||||
|
command: |
|
||||||
|
nix develop --command -- docker run \
|
||||||
|
--tty --rm \
|
||||||
|
--volume ~/.cache/hs-integration-go:/go \
|
||||||
|
--name headscale-test-suite \
|
||||||
|
--volume $PWD:$PWD -w $PWD/integration \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--volume $PWD/control_logs:/tmp/control \
|
||||||
|
golang:1 \
|
||||||
|
go run gotest.tools/gotestsum@latest -- ./... \
|
||||||
|
-failfast \
|
||||||
|
-timeout 120m \
|
||||||
|
-parallel 1 \
|
||||||
|
-run "^TestNodeAdvertiseTagWithACLCommand$"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: "control_logs/*.log"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: pprof
|
||||||
|
path: "control_logs/*.pprof.tar"
|
67
.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml
vendored
Normal file
67
.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
|
name: Integration Test v2 - TestSubnetRouteACL
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
TestSubnetRouteACL:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Get changed files
|
||||||
|
id: changed-files
|
||||||
|
uses: tj-actions/changed-files@v34
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
*.nix
|
||||||
|
go.*
|
||||||
|
**/*.go
|
||||||
|
integration_test/
|
||||||
|
config-example.yaml
|
||||||
|
|
||||||
|
- name: Run TestSubnetRouteACL
|
||||||
|
uses: Wandalen/wretry.action@master
|
||||||
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
attempt_limit: 5
|
||||||
|
command: |
|
||||||
|
nix develop --command -- docker run \
|
||||||
|
--tty --rm \
|
||||||
|
--volume ~/.cache/hs-integration-go:/go \
|
||||||
|
--name headscale-test-suite \
|
||||||
|
--volume $PWD:$PWD -w $PWD/integration \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--volume $PWD/control_logs:/tmp/control \
|
||||||
|
golang:1 \
|
||||||
|
go run gotest.tools/gotestsum@latest -- ./... \
|
||||||
|
-failfast \
|
||||||
|
-timeout 120m \
|
||||||
|
-parallel 1 \
|
||||||
|
-run "^TestSubnetRouteACL$"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: "control_logs/*.log"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: pprof
|
||||||
|
path: "control_logs/*.pprof.tar"
|
@@ -9,20 +9,20 @@ release:
|
|||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: headscale
|
- id: headscale
|
||||||
main: ./cmd/headscale/headscale.go
|
main: ./cmd/headscale
|
||||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
targets:
|
targets:
|
||||||
- darwin_amd64
|
# - darwin_amd64
|
||||||
- darwin_arm64
|
# - darwin_arm64
|
||||||
- freebsd_amd64
|
# - freebsd_amd64
|
||||||
- linux_386
|
# - linux_386
|
||||||
- linux_amd64
|
- linux_amd64
|
||||||
- linux_arm64
|
# - linux_arm64
|
||||||
- linux_arm_5
|
# - linux_arm_5
|
||||||
- linux_arm_6
|
# - linux_arm_6
|
||||||
- linux_arm_7
|
# - linux_arm_7
|
||||||
flags:
|
flags:
|
||||||
- -mod=readonly
|
- -mod=readonly
|
||||||
ldflags:
|
ldflags:
|
||||||
@@ -63,7 +63,6 @@ nfpms:
|
|||||||
bindir: /usr/bin
|
bindir: /usr/bin
|
||||||
formats:
|
formats:
|
||||||
- deb
|
- deb
|
||||||
# - rpm
|
|
||||||
contents:
|
contents:
|
||||||
- src: ./config-example.yaml
|
- src: ./config-example.yaml
|
||||||
dst: /etc/headscale/config.yaml
|
dst: /etc/headscale/config.yaml
|
||||||
@@ -80,6 +79,43 @@ nfpms:
|
|||||||
postinstall: ./docs/packaging/postinstall.sh
|
postinstall: ./docs/packaging/postinstall.sh
|
||||||
postremove: ./docs/packaging/postremove.sh
|
postremove: ./docs/packaging/postremove.sh
|
||||||
|
|
||||||
|
kos:
|
||||||
|
- id: ghcr
|
||||||
|
build: headscale
|
||||||
|
main: ./cmd/headscale
|
||||||
|
base_image: gcr.io/distroless/base-debian11
|
||||||
|
repository: ghcr.io/juanfont/headscale
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
# - linux/386
|
||||||
|
# - linux/arm64
|
||||||
|
# - linux/arm/v7
|
||||||
|
# - linux/arm/v6
|
||||||
|
# - linux/arm/v5
|
||||||
|
tags:
|
||||||
|
- latest
|
||||||
|
- '{{.Tag}}'
|
||||||
|
- '{{ .Major }}.{{ .Minor }}'
|
||||||
|
- '{{ .Major }}'
|
||||||
|
- '{{ if not .Prerelease }}stable{{ end }}'
|
||||||
|
|
||||||
|
# - id: dockerhub
|
||||||
|
# build: headscale
|
||||||
|
# base_image: gcr.io/distroless/base-debian11
|
||||||
|
# repository: headscale/headscale
|
||||||
|
# platforms:
|
||||||
|
# - linux/amd64
|
||||||
|
# - linux/386
|
||||||
|
# - linux/arm64
|
||||||
|
# - linux/arm/v7
|
||||||
|
# - linux/arm/v6
|
||||||
|
# - linux/arm/v5
|
||||||
|
# tags:
|
||||||
|
# - latest
|
||||||
|
# - '{{.Tag}}'
|
||||||
|
|
||||||
checksum:
|
checksum:
|
||||||
name_template: "checksums.txt"
|
name_template: "checksums.txt"
|
||||||
snapshot:
|
snapshot:
|
||||||
|
@@ -26,7 +26,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||||||
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||||
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||||
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||||
- The latest supported client is 1.32
|
- The latest supported client is 1.36
|
||||||
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
|
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
|
||||||
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
|
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
|
||||||
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
|
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||||
@@ -43,6 +43,7 @@ Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) tak
|
|||||||
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
|
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
|
||||||
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
|
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
|
||||||
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
|
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
|
||||||
|
Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
|
||||||
|
|
||||||
## 0.22.3 (2023-05-12)
|
## 0.22.3 (2023-05-12)
|
||||||
|
|
||||||
|
@@ -1,4 +1,7 @@
|
|||||||
# Builder image
|
# This Dockerfile and the images produced are for testing headscale,
|
||||||
|
# and are in no way endorsed by Headscale's maintainers as an
|
||||||
|
# official nor supported release or distribution.
|
||||||
|
|
||||||
FROM docker.io/golang:1.21-bookworm AS build
|
FROM docker.io/golang:1.21-bookworm AS build
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ENV GOPATH /go
|
ENV GOPATH /go
|
||||||
|
@@ -1,4 +1,7 @@
|
|||||||
# Builder image
|
# This Dockerfile and the images produced are for testing headscale,
|
||||||
|
# and are in no way endorsed by Headscale's maintainers as an
|
||||||
|
# official nor supported release or distribution.
|
||||||
|
|
||||||
FROM docker.io/golang:1.21-bookworm AS build
|
FROM docker.io/golang:1.21-bookworm AS build
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ENV GOPATH /go
|
ENV GOPATH /go
|
||||||
|
@@ -1,3 +1,7 @@
|
|||||||
|
# This Dockerfile and the images produced are for testing headscale,
|
||||||
|
# and are in no way endorsed by Headscale's maintainers as an
|
||||||
|
# official nor supported release or distribution.
|
||||||
|
|
||||||
FROM golang:latest
|
FROM golang:latest
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
|
@@ -94,6 +94,16 @@ derp:
|
|||||||
#
|
#
|
||||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||||
|
|
||||||
|
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
|
||||||
|
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
|
||||||
|
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
|
||||||
|
automatically_add_embedded_derp_region: true
|
||||||
|
|
||||||
|
# For better connection stability (especially when using an Exit-Node and DNS is not working),
|
||||||
|
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
|
||||||
|
ipv4: 1.2.3.4
|
||||||
|
ipv6: 2001:db8::1
|
||||||
|
|
||||||
# List of externally available DERP maps encoded in JSON
|
# List of externally available DERP maps encoded in JSON
|
||||||
urls:
|
urls:
|
||||||
- https://controlplane.tailscale.com/derpmap/default
|
- https://controlplane.tailscale.com/derpmap/default
|
||||||
|
@@ -95,6 +95,7 @@
|
|||||||
gotestsum
|
gotestsum
|
||||||
gotests
|
gotests
|
||||||
ksh
|
ksh
|
||||||
|
ko
|
||||||
|
|
||||||
# 'dot' is needed for pprof graphs
|
# 'dot' is needed for pprof graphs
|
||||||
# go tool pprof -http=: <source>
|
# go tool pprof -http=: <source>
|
||||||
|
@@ -268,7 +268,7 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
|||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Info().Msg("Fetching DERPMap updates")
|
log.Info().Msg("Fetching DERPMap updates")
|
||||||
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
|
||||||
if h.cfg.DERP.ServerEnabled {
|
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||||
region, _ := h.DERPServer.GenerateRegion()
|
region, _ := h.DERPServer.GenerateRegion()
|
||||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||||
}
|
}
|
||||||
@@ -501,7 +501,9 @@ func (h *Headscale) Serve() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
h.DERPMap.Regions[region.RegionID] = ®ion
|
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
|
||||||
|
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||||
|
}
|
||||||
|
|
||||||
go h.DERPServer.ServeSTUN()
|
go h.DERPServer.ServeSTUN()
|
||||||
}
|
}
|
||||||
|
@@ -340,6 +340,16 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node.Expiry = &expiry
|
||||||
|
|
||||||
|
stateSelfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if stateSelfUpdate.Valid() {
|
||||||
|
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
|
||||||
|
}
|
||||||
|
|
||||||
stateUpdate := types.StateUpdate{
|
stateUpdate := types.StateUpdate{
|
||||||
Type: types.StatePeerChangedPatch,
|
Type: types.StatePeerChangedPatch,
|
||||||
ChangePatches: []*tailcfg.PeerChange{
|
ChangePatches: []*tailcfg.PeerChange{
|
||||||
@@ -350,7 +360,7 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
if stateUpdate.Valid() {
|
if stateUpdate.Valid() {
|
||||||
hsdb.notifier.NotifyAll(stateUpdate)
|
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -729,6 +739,19 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
|
|||||||
stateUpdate, node.MachineKey.String())
|
stateUpdate, node.MachineKey.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to the node itself with to ensure it
|
||||||
|
// has an updated packetfilter allowing the new route
|
||||||
|
// if it is defined in the ACL.
|
||||||
|
selfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if selfUpdate.Valid() {
|
||||||
|
hsdb.notifier.NotifyByMachineKey(
|
||||||
|
selfUpdate,
|
||||||
|
node.MachineKey)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -856,7 +879,7 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||||||
// checked everything.
|
// checked everything.
|
||||||
started := time.Now()
|
started := time.Now()
|
||||||
|
|
||||||
expired := make([]*tailcfg.PeerChange, 0)
|
expiredNodes := make([]*types.Node, 0)
|
||||||
|
|
||||||
nodes, err := hsdb.listNodes()
|
nodes, err := hsdb.listNodes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -872,17 +895,13 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||||||
// It will notify about all nodes that has been expired.
|
// It will notify about all nodes that has been expired.
|
||||||
// It should only notify about expired nodes since _last check_.
|
// It should only notify about expired nodes since _last check_.
|
||||||
node.Expiry.After(lastCheck) {
|
node.Expiry.After(lastCheck) {
|
||||||
expired = append(expired, &tailcfg.PeerChange{
|
expiredNodes = append(expiredNodes, &nodes[index])
|
||||||
NodeID: tailcfg.NodeID(node.ID),
|
|
||||||
KeyExpiry: node.Expiry,
|
|
||||||
})
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
// Do not use setNodeExpiry as that has a notifier hook, which
|
// Do not use setNodeExpiry as that has a notifier hook, which
|
||||||
// can cause a deadlock, we are updating all changed nodes later
|
// can cause a deadlock, we are updating all changed nodes later
|
||||||
// and there is no point in notifiying twice.
|
// and there is no point in notifiying twice.
|
||||||
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
|
if err := hsdb.db.Model(&nodes[index]).Updates(types.Node{
|
||||||
Expiry: &now,
|
Expiry: &started,
|
||||||
}).Error; err != nil {
|
}).Error; err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Err(err).
|
Err(err).
|
||||||
@@ -898,6 +917,15 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expired := make([]*tailcfg.PeerChange, len(expiredNodes))
|
||||||
|
for idx, node := range expiredNodes {
|
||||||
|
expired[idx] = &tailcfg.PeerChange{
|
||||||
|
NodeID: tailcfg.NodeID(node.ID),
|
||||||
|
KeyExpiry: &started,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inform the peers of a node with a lightweight update.
|
||||||
stateUpdate := types.StateUpdate{
|
stateUpdate := types.StateUpdate{
|
||||||
Type: types.StatePeerChangedPatch,
|
Type: types.StatePeerChangedPatch,
|
||||||
ChangePatches: expired,
|
ChangePatches: expired,
|
||||||
@@ -906,5 +934,16 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||||||
hsdb.notifier.NotifyAll(stateUpdate)
|
hsdb.notifier.NotifyAll(stateUpdate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Inform the node itself that it has expired.
|
||||||
|
for _, node := range expiredNodes {
|
||||||
|
stateSelfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if stateSelfUpdate.Valid() {
|
||||||
|
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return started
|
return started
|
||||||
}
|
}
|
||||||
|
@@ -349,7 +349,7 @@ func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, er
|
|||||||
|
|
||||||
// SaveNodeRoutes takes a node and updates the database with
|
// SaveNodeRoutes takes a node and updates the database with
|
||||||
// the new routes.
|
// the new routes.
|
||||||
// It returns a bool wheter an update should be sent as the
|
// It returns a bool whether an update should be sent as the
|
||||||
// saved route impacts nodes.
|
// saved route impacts nodes.
|
||||||
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
|
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
|
||||||
hsdb.mu.Lock()
|
hsdb.mu.Lock()
|
||||||
@@ -585,6 +585,10 @@ func (hsdb *HSDatabase) failoverRoute(r *types.Route) ([]key.MachinePublic, erro
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !route.Enabled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
|
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
|
||||||
newPrimary = &routes[idx]
|
newPrimary = &routes[idx]
|
||||||
break
|
break
|
||||||
@@ -639,13 +643,19 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
|
|||||||
aclPolicy *policy.ACLPolicy,
|
aclPolicy *policy.ACLPolicy,
|
||||||
node *types.Node,
|
node *types.Node,
|
||||||
) error {
|
) error {
|
||||||
hsdb.mu.Lock()
|
if len(aclPolicy.AutoApprovers.ExitNode) == 0 && len(aclPolicy.AutoApprovers.Routes) == 0 {
|
||||||
defer hsdb.mu.Unlock()
|
// No autoapprovers configured
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if len(node.IPAddresses) == 0 {
|
if len(node.IPAddresses) == 0 {
|
||||||
return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
|
// This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hsdb.mu.Lock()
|
||||||
|
defer hsdb.mu.Unlock()
|
||||||
|
|
||||||
routes, err := hsdb.getNodeAdvertisedRoutes(node)
|
routes, err := hsdb.getNodeAdvertisedRoutes(node)
|
||||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
log.Error().
|
log.Error().
|
||||||
@@ -657,6 +667,8 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Trace().Interface("routes", routes).Msg("routes for autoapproving")
|
||||||
|
|
||||||
approvedRoutes := types.Routes{}
|
approvedRoutes := types.Routes{}
|
||||||
|
|
||||||
for _, advertisedRoute := range routes {
|
for _, advertisedRoute := range routes {
|
||||||
@@ -676,6 +688,13 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Trace().
|
||||||
|
Str("node", node.Hostname).
|
||||||
|
Str("user", node.User.Name).
|
||||||
|
Strs("routeApprovers", routeApprovers).
|
||||||
|
Str("prefix", netip.Prefix(advertisedRoute.Prefix).String()).
|
||||||
|
Msg("looking up route for autoapproving")
|
||||||
|
|
||||||
for _, approvedAlias := range routeApprovers {
|
for _, approvedAlias := range routeApprovers {
|
||||||
if approvedAlias == node.User.Name {
|
if approvedAlias == node.User.Name {
|
||||||
approvedRoutes = append(approvedRoutes, advertisedRoute)
|
approvedRoutes = append(approvedRoutes, advertisedRoute)
|
||||||
|
@@ -371,6 +371,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
routes: types.Routes{
|
routes: types.Routes{
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -382,6 +383,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
types.Route{
|
types.Route{
|
||||||
Model: gorm.Model{
|
Model: gorm.Model{
|
||||||
@@ -392,6 +394,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[1],
|
MachineKey: machineKeys[1],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: []key.MachinePublic{
|
want: []key.MachinePublic{
|
||||||
@@ -411,6 +414,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
routes: types.Routes{
|
routes: types.Routes{
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -422,6 +426,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
types.Route{
|
types.Route{
|
||||||
Model: gorm.Model{
|
Model: gorm.Model{
|
||||||
@@ -432,6 +437,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[1],
|
MachineKey: machineKeys[1],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: nil,
|
want: nil,
|
||||||
@@ -448,6 +454,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[1],
|
MachineKey: machineKeys[1],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
routes: types.Routes{
|
routes: types.Routes{
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -459,6 +466,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
types.Route{
|
types.Route{
|
||||||
Model: gorm.Model{
|
Model: gorm.Model{
|
||||||
@@ -469,6 +477,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[1],
|
MachineKey: machineKeys[1],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
types.Route{
|
types.Route{
|
||||||
Model: gorm.Model{
|
Model: gorm.Model{
|
||||||
@@ -479,6 +488,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[2],
|
MachineKey: machineKeys[2],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: []key.MachinePublic{
|
want: []key.MachinePublic{
|
||||||
@@ -498,6 +508,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
routes: types.Routes{
|
routes: types.Routes{
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -509,6 +520,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
// Offline
|
// Offline
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -520,6 +532,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[3],
|
MachineKey: machineKeys[3],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: nil,
|
want: nil,
|
||||||
@@ -536,6 +549,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
routes: types.Routes{
|
routes: types.Routes{
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -547,6 +561,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[0],
|
MachineKey: machineKeys[0],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
// Offline
|
// Offline
|
||||||
types.Route{
|
types.Route{
|
||||||
@@ -558,6 +573,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[3],
|
MachineKey: machineKeys[3],
|
||||||
},
|
},
|
||||||
IsPrimary: false,
|
IsPrimary: false,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
types.Route{
|
types.Route{
|
||||||
Model: gorm.Model{
|
Model: gorm.Model{
|
||||||
@@ -568,6 +584,7 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
MachineKey: machineKeys[1],
|
MachineKey: machineKeys[1],
|
||||||
},
|
},
|
||||||
IsPrimary: true,
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: []key.MachinePublic{
|
want: []key.MachinePublic{
|
||||||
@@ -576,6 +593,47 @@ func TestFailoverRoute(t *testing.T) {
|
|||||||
},
|
},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "failover-primary-none-enabled",
|
||||||
|
failingRoute: types.Route{
|
||||||
|
Model: gorm.Model{
|
||||||
|
ID: 1,
|
||||||
|
},
|
||||||
|
Prefix: ipp("10.0.0.0/24"),
|
||||||
|
Node: types.Node{
|
||||||
|
MachineKey: machineKeys[0],
|
||||||
|
},
|
||||||
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
routes: types.Routes{
|
||||||
|
types.Route{
|
||||||
|
Model: gorm.Model{
|
||||||
|
ID: 1,
|
||||||
|
},
|
||||||
|
Prefix: ipp("10.0.0.0/24"),
|
||||||
|
Node: types.Node{
|
||||||
|
MachineKey: machineKeys[0],
|
||||||
|
},
|
||||||
|
IsPrimary: true,
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
// not enabled
|
||||||
|
types.Route{
|
||||||
|
Model: gorm.Model{
|
||||||
|
ID: 2,
|
||||||
|
},
|
||||||
|
Prefix: ipp("10.0.0.0/24"),
|
||||||
|
Node: types.Node{
|
||||||
|
MachineKey: machineKeys[1],
|
||||||
|
},
|
||||||
|
IsPrimary: false,
|
||||||
|
Enabled: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: nil,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@@ -84,6 +84,8 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
|||||||
RegionID: d.cfg.ServerRegionID,
|
RegionID: d.cfg.ServerRegionID,
|
||||||
HostName: host,
|
HostName: host,
|
||||||
DERPPort: port,
|
DERPPort: port,
|
||||||
|
IPv4: d.cfg.IPv4,
|
||||||
|
IPv6: d.cfg.IPv6,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -99,6 +101,7 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
|
|||||||
localDERPregion.Nodes[0].STUNPort = portSTUN
|
localDERPregion.Nodes[0].STUNPort = portSTUN
|
||||||
|
|
||||||
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
|
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
|
||||||
|
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0])
|
||||||
|
|
||||||
return localDERPregion, nil
|
return localDERPregion, nil
|
||||||
}
|
}
|
||||||
@@ -208,6 +211,7 @@ func DERPProbeHandler(
|
|||||||
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
|
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
|
||||||
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
|
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
|
||||||
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
||||||
|
// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL
|
||||||
func DERPBootstrapDNSHandler(
|
func DERPBootstrapDNSHandler(
|
||||||
derpMap *tailcfg.DERPMap,
|
derpMap *tailcfg.DERPMap,
|
||||||
) func(http.ResponseWriter, *http.Request) {
|
) func(http.ResponseWriter, *http.Request) {
|
||||||
|
@@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
"github.com/samber/lo"
|
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
"tailscale.com/envknob"
|
"tailscale.com/envknob"
|
||||||
"tailscale.com/smallzstd"
|
"tailscale.com/smallzstd"
|
||||||
@@ -279,6 +278,18 @@ func (m *Mapper) LiteMapResponse(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
|
||||||
|
pol,
|
||||||
|
node,
|
||||||
|
nodeMapToList(m.peers),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
|
||||||
|
resp.SSHPolicy = sshPolicy
|
||||||
|
|
||||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,6 +470,8 @@ func (m *Mapper) marshalMapResponse(
|
|||||||
switch {
|
switch {
|
||||||
case resp.Peers != nil && len(resp.Peers) > 0:
|
case resp.Peers != nil && len(resp.Peers) > 0:
|
||||||
responseType = "full"
|
responseType = "full"
|
||||||
|
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
|
||||||
|
responseType = "lite"
|
||||||
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
|
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
|
||||||
responseType = "changed"
|
responseType = "changed"
|
||||||
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
|
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
|
||||||
@@ -593,15 +606,6 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
|
|
||||||
return lo.Filter(peers, func(item *types.Node, index int) bool {
|
|
||||||
// Filter out nodes that are expired OR
|
|
||||||
// nodes that has no endpoints, this typically means they have
|
|
||||||
// registered, but are not configured.
|
|
||||||
return !item.IsExpired() || len(item.Endpoints) > 0
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendPeerChanges mutates a tailcfg.MapResponse with all the
|
// appendPeerChanges mutates a tailcfg.MapResponse with all the
|
||||||
// necessary changes when peers have changed.
|
// necessary changes when peers have changed.
|
||||||
func appendPeerChanges(
|
func appendPeerChanges(
|
||||||
@@ -627,9 +631,6 @@ func appendPeerChanges(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter out peers that have expired.
|
|
||||||
changed = filterExpiredAndNotReady(changed)
|
|
||||||
|
|
||||||
// If there are filter rules present, see if there are any nodes that cannot
|
// If there are filter rules present, see if there are any nodes that cannot
|
||||||
// access eachother at all and remove them from the peers.
|
// access eachother at all and remove them from the peers.
|
||||||
if len(rules) > 0 {
|
if len(rules) > 0 {
|
||||||
|
@@ -95,6 +95,21 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) {
|
||||||
|
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
|
||||||
|
defer log.Trace().
|
||||||
|
Caller().
|
||||||
|
Interface("type", update.Type).
|
||||||
|
Msg("releasing lock, finished notifing")
|
||||||
|
|
||||||
|
n.l.RLock()
|
||||||
|
defer n.l.RUnlock()
|
||||||
|
|
||||||
|
if c, ok := n.nodes[mKey.String()]; ok {
|
||||||
|
c <- update
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (n *Notifier) String() string {
|
func (n *Notifier) String() string {
|
||||||
n.l.RLock()
|
n.l.RLock()
|
||||||
defer n.l.RUnlock()
|
defer n.l.RUnlock()
|
||||||
|
@@ -250,6 +250,21 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
|
|||||||
if node.IPAddresses.InIPSet(expanded) {
|
if node.IPAddresses.InIPSet(expanded) {
|
||||||
dests = append(dests, dest)
|
dests = append(dests, dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the node exposes routes, ensure they are note removed
|
||||||
|
// when the filters are reduced.
|
||||||
|
if node.Hostinfo != nil {
|
||||||
|
// TODO(kradalby): Evaluate if we should only keep
|
||||||
|
// the routes if the route is enabled. This will
|
||||||
|
// require database access in this part of the code.
|
||||||
|
if len(node.Hostinfo.RoutableIPs) > 0 {
|
||||||
|
for _, routableIP := range node.Hostinfo.RoutableIPs {
|
||||||
|
if expanded.ContainsPrefix(routableIP) {
|
||||||
|
dests = append(dests, dest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dests) > 0 {
|
if len(dests) > 0 {
|
||||||
@@ -674,14 +689,18 @@ func expandOwnersFromTag(
|
|||||||
pol *ACLPolicy,
|
pol *ACLPolicy,
|
||||||
tag string,
|
tag string,
|
||||||
) ([]string, error) {
|
) ([]string, error) {
|
||||||
|
noTagErr := fmt.Errorf(
|
||||||
|
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
||||||
|
ErrInvalidTag,
|
||||||
|
tag,
|
||||||
|
)
|
||||||
|
if pol == nil {
|
||||||
|
return []string{}, noTagErr
|
||||||
|
}
|
||||||
var owners []string
|
var owners []string
|
||||||
ows, ok := pol.TagOwners[tag]
|
ows, ok := pol.TagOwners[tag]
|
||||||
if !ok {
|
if !ok {
|
||||||
return []string{}, fmt.Errorf(
|
return []string{}, noTagErr
|
||||||
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
|
||||||
ErrInvalidTag,
|
|
||||||
tag,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
for _, owner := range ows {
|
for _, owner := range ows {
|
||||||
if isGroup(owner) {
|
if isGroup(owner) {
|
||||||
|
@@ -1901,6 +1901,81 @@ func TestReduceFilterRules(t *testing.T) {
|
|||||||
},
|
},
|
||||||
want: []tailcfg.FilterRule{},
|
want: []tailcfg.FilterRule{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "1604-subnet-routers-are-preserved",
|
||||||
|
pol: ACLPolicy{
|
||||||
|
Groups: Groups{
|
||||||
|
"group:admins": {"user1"},
|
||||||
|
},
|
||||||
|
ACLs: []ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"group:admins:*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"10.33.0.0/16:*"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node: &types.Node{
|
||||||
|
IPAddresses: types.NodeAddresses{
|
||||||
|
netip.MustParseAddr("100.64.0.1"),
|
||||||
|
netip.MustParseAddr("fd7a:115c:a1e0::1"),
|
||||||
|
},
|
||||||
|
User: types.User{Name: "user1"},
|
||||||
|
Hostinfo: &tailcfg.Hostinfo{
|
||||||
|
RoutableIPs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("10.33.0.0/16"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
peers: types.Nodes{
|
||||||
|
&types.Node{
|
||||||
|
IPAddresses: types.NodeAddresses{
|
||||||
|
netip.MustParseAddr("100.64.0.2"),
|
||||||
|
netip.MustParseAddr("fd7a:115c:a1e0::2"),
|
||||||
|
},
|
||||||
|
User: types.User{Name: "user1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
SrcIPs: []string{
|
||||||
|
"100.64.0.1/32",
|
||||||
|
"100.64.0.2/32",
|
||||||
|
"fd7a:115c:a1e0::1/128",
|
||||||
|
"fd7a:115c:a1e0::2/128",
|
||||||
|
},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{
|
||||||
|
{
|
||||||
|
IP: "100.64.0.1/32",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IP: "fd7a:115c:a1e0::1/128",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SrcIPs: []string{
|
||||||
|
"100.64.0.1/32",
|
||||||
|
"100.64.0.2/32",
|
||||||
|
"fd7a:115c:a1e0::1/128",
|
||||||
|
"fd7a:115c:a1e0::2/128",
|
||||||
|
},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{
|
||||||
|
{
|
||||||
|
IP: "10.33.0.0/16",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@@ -125,6 +125,14 @@ func (h *Headscale) handlePoll(
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.ACLPolicy != nil {
|
||||||
|
// update routes with peer information
|
||||||
|
err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node)
|
||||||
|
if err != nil {
|
||||||
|
logErr(err, "Error running auto approved routes")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Services is mostly useful for discovery and not critical,
|
// Services is mostly useful for discovery and not critical,
|
||||||
@@ -145,6 +153,8 @@ func (h *Headscale) handlePoll(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to all peers to propagate the new routes
|
||||||
|
// available.
|
||||||
stateUpdate := types.StateUpdate{
|
stateUpdate := types.StateUpdate{
|
||||||
Type: types.StatePeerChanged,
|
Type: types.StatePeerChanged,
|
||||||
ChangeNodes: types.Nodes{node},
|
ChangeNodes: types.Nodes{node},
|
||||||
@@ -156,6 +166,19 @@ func (h *Headscale) handlePoll(
|
|||||||
node.MachineKey.String())
|
node.MachineKey.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to the node itself with to ensure it
|
||||||
|
// has an updated packetfilter allowing the new route
|
||||||
|
// if it is defined in the ACL.
|
||||||
|
selfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if selfUpdate.Valid() {
|
||||||
|
h.nodeNotifier.NotifyByMachineKey(
|
||||||
|
selfUpdate,
|
||||||
|
node.MachineKey)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -370,6 +393,16 @@ func (h *Headscale) handlePoll(
|
|||||||
var data []byte
|
var data []byte
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
// Ensure the node object is updated, for example, there
|
||||||
|
// might have been a hostinfo update in a sidechannel
|
||||||
|
// which contains data needed to generate a map response.
|
||||||
|
node, err = h.db.GetNodeByMachineKey(node.MachineKey)
|
||||||
|
if err != nil {
|
||||||
|
logErr(err, "Could not get machine from db")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
switch update.Type {
|
switch update.Type {
|
||||||
case types.StateFullUpdate:
|
case types.StateFullUpdate:
|
||||||
logInfo("Sending Full MapResponse")
|
logInfo("Sending Full MapResponse")
|
||||||
@@ -397,6 +430,14 @@ func (h *Headscale) handlePoll(
|
|||||||
case types.StatePeerRemoved:
|
case types.StatePeerRemoved:
|
||||||
logInfo("Sending PeerRemoved MapResponse")
|
logInfo("Sending PeerRemoved MapResponse")
|
||||||
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
|
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
|
||||||
|
case types.StateSelfUpdate:
|
||||||
|
if len(update.ChangeNodes) == 1 {
|
||||||
|
logInfo("Sending SelfUpdate MapResponse")
|
||||||
|
node = update.ChangeNodes[0]
|
||||||
|
data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy)
|
||||||
|
} else {
|
||||||
|
logInfo("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.")
|
||||||
|
}
|
||||||
case types.StateDERPUpdated:
|
case types.StateDERPUpdated:
|
||||||
logInfo("Sending DERPUpdate MapResponse")
|
logInfo("Sending DERPUpdate MapResponse")
|
||||||
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)
|
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)
|
||||||
|
@@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MinimumCapVersion tailcfg.CapabilityVersion = 36
|
MinimumCapVersion tailcfg.CapabilityVersion = 56
|
||||||
)
|
)
|
||||||
|
|
||||||
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
||||||
|
@@ -91,6 +91,12 @@ const (
|
|||||||
StatePeerChanged
|
StatePeerChanged
|
||||||
StatePeerChangedPatch
|
StatePeerChangedPatch
|
||||||
StatePeerRemoved
|
StatePeerRemoved
|
||||||
|
// StateSelfUpdate is used to indicate that the node
|
||||||
|
// has changed in control, and the client needs to be
|
||||||
|
// informed.
|
||||||
|
// The updated node is inside the ChangeNodes field
|
||||||
|
// which should have a length of one.
|
||||||
|
StateSelfUpdate
|
||||||
StateDERPUpdated
|
StateDERPUpdated
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -142,6 +148,10 @@ func (su *StateUpdate) Valid() bool {
|
|||||||
if su.Removed == nil {
|
if su.Removed == nil {
|
||||||
panic("Mandatory field Removed is not set on StatePeerRemove update")
|
panic("Mandatory field Removed is not set on StatePeerRemove update")
|
||||||
}
|
}
|
||||||
|
case StateSelfUpdate:
|
||||||
|
if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 {
|
||||||
|
panic("Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node")
|
||||||
|
}
|
||||||
case StateDERPUpdated:
|
case StateDERPUpdated:
|
||||||
if su.DERPMap == nil {
|
if su.DERPMap == nil {
|
||||||
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")
|
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")
|
||||||
|
@@ -107,16 +107,19 @@ type OIDCConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type DERPConfig struct {
|
type DERPConfig struct {
|
||||||
ServerEnabled bool
|
ServerEnabled bool
|
||||||
ServerRegionID int
|
AutomaticallyAddEmbeddedDerpRegion bool
|
||||||
ServerRegionCode string
|
ServerRegionID int
|
||||||
ServerRegionName string
|
ServerRegionCode string
|
||||||
ServerPrivateKeyPath string
|
ServerRegionName string
|
||||||
STUNAddr string
|
ServerPrivateKeyPath string
|
||||||
URLs []url.URL
|
STUNAddr string
|
||||||
Paths []string
|
URLs []url.URL
|
||||||
AutoUpdate bool
|
Paths []string
|
||||||
UpdateFrequency time.Duration
|
AutoUpdate bool
|
||||||
|
UpdateFrequency time.Duration
|
||||||
|
IPv4 string
|
||||||
|
IPv6 string
|
||||||
}
|
}
|
||||||
|
|
||||||
type LogTailConfig struct {
|
type LogTailConfig struct {
|
||||||
@@ -169,6 +172,7 @@ func LoadConfig(path string, isFile bool) error {
|
|||||||
|
|
||||||
viper.SetDefault("derp.server.enabled", false)
|
viper.SetDefault("derp.server.enabled", false)
|
||||||
viper.SetDefault("derp.server.stun.enabled", true)
|
viper.SetDefault("derp.server.stun.enabled", true)
|
||||||
|
viper.SetDefault("derp.server.automatically_add_embedded_derp_region", true)
|
||||||
|
|
||||||
viper.SetDefault("unix_socket", "/var/run/headscale/headscale.sock")
|
viper.SetDefault("unix_socket", "/var/run/headscale/headscale.sock")
|
||||||
viper.SetDefault("unix_socket_permission", "0o770")
|
viper.SetDefault("unix_socket_permission", "0o770")
|
||||||
@@ -286,8 +290,14 @@ func GetDERPConfig() DERPConfig {
|
|||||||
serverRegionCode := viper.GetString("derp.server.region_code")
|
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||||
serverRegionName := viper.GetString("derp.server.region_name")
|
serverRegionName := viper.GetString("derp.server.region_name")
|
||||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||||
privateKeyPath := util.AbsolutePathFromConfigPath(viper.GetString("derp.server.private_key_path"))
|
privateKeyPath := util.AbsolutePathFromConfigPath(
|
||||||
|
viper.GetString("derp.server.private_key_path"),
|
||||||
|
)
|
||||||
|
ipv4 := viper.GetString("derp.server.ipv4")
|
||||||
|
ipv6 := viper.GetString("derp.server.ipv6")
|
||||||
|
automaticallyAddEmbeddedDerpRegion := viper.GetBool(
|
||||||
|
"derp.server.automatically_add_embedded_derp_region",
|
||||||
|
)
|
||||||
if serverEnabled && stunAddr == "" {
|
if serverEnabled && stunAddr == "" {
|
||||||
log.Fatal().
|
log.Fatal().
|
||||||
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
|
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
|
||||||
@@ -310,20 +320,28 @@ func GetDERPConfig() DERPConfig {
|
|||||||
|
|
||||||
paths := viper.GetStringSlice("derp.paths")
|
paths := viper.GetStringSlice("derp.paths")
|
||||||
|
|
||||||
|
if serverEnabled && !automaticallyAddEmbeddedDerpRegion && len(paths) == 0 {
|
||||||
|
log.Fatal().
|
||||||
|
Msg("Disabling derp.server.automatically_add_embedded_derp_region requires to configure the derp server in derp.paths")
|
||||||
|
}
|
||||||
|
|
||||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||||
|
|
||||||
return DERPConfig{
|
return DERPConfig{
|
||||||
ServerEnabled: serverEnabled,
|
ServerEnabled: serverEnabled,
|
||||||
ServerRegionID: serverRegionID,
|
ServerRegionID: serverRegionID,
|
||||||
ServerRegionCode: serverRegionCode,
|
ServerRegionCode: serverRegionCode,
|
||||||
ServerRegionName: serverRegionName,
|
ServerRegionName: serverRegionName,
|
||||||
ServerPrivateKeyPath: privateKeyPath,
|
ServerPrivateKeyPath: privateKeyPath,
|
||||||
STUNAddr: stunAddr,
|
STUNAddr: stunAddr,
|
||||||
URLs: urls,
|
URLs: urls,
|
||||||
Paths: paths,
|
Paths: paths,
|
||||||
AutoUpdate: autoUpdate,
|
AutoUpdate: autoUpdate,
|
||||||
UpdateFrequency: updateFrequency,
|
UpdateFrequency: updateFrequency,
|
||||||
|
IPv4: ipv4,
|
||||||
|
IPv6: ipv6,
|
||||||
|
AutomaticallyAddEmbeddedDerpRegion: automaticallyAddEmbeddedDerpRegion,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -572,7 +590,7 @@ func GetHeadscaleConfig() (*Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
oidcClientSecret = string(secretBytes)
|
oidcClientSecret = strings.TrimSpace(string(secretBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Config{
|
return &Config{
|
||||||
|
@@ -383,7 +383,7 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
|
|||||||
// inform peers about smaller changes to the node.
|
// inform peers about smaller changes to the node.
|
||||||
// When a field is added to this function, remember to also add it to:
|
// When a field is added to this function, remember to also add it to:
|
||||||
// - node.ApplyPeerChange
|
// - node.ApplyPeerChange
|
||||||
// - logTracePeerChange in poll.go
|
// - logTracePeerChange in poll.go.
|
||||||
func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {
|
func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {
|
||||||
ret := tailcfg.PeerChange{
|
ret := tailcfg.PeerChange{
|
||||||
NodeID: tailcfg.NodeID(node.ID),
|
NodeID: tailcfg.NodeID(node.ID),
|
||||||
|
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/policy"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -665,6 +666,119 @@ func TestNodeTagCommand(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeAdvertiseTagNoACLCommand(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
scenario, err := NewScenario()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
"user1": 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:test"})}, hsic.WithTestName("cliadvtags"))
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
// Test list all nodes after added seconds
|
||||||
|
resultMachines := make([]*v1.Node, spec["user1"])
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"nodes",
|
||||||
|
"list",
|
||||||
|
"--tags",
|
||||||
|
"--output", "json",
|
||||||
|
},
|
||||||
|
&resultMachines,
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
found := false
|
||||||
|
for _, node := range resultMachines {
|
||||||
|
if node.GetInvalidTags() != nil {
|
||||||
|
for _, tag := range node.GetInvalidTags() {
|
||||||
|
if tag == "tag:test" {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
true,
|
||||||
|
found,
|
||||||
|
"should not find a node with the tag 'tag:test' in the list of nodes",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeAdvertiseTagWithACLCommand(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
scenario, err := NewScenario()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
"user1": 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:exists"})}, hsic.WithTestName("cliadvtags"), hsic.WithACLPolicy(
|
||||||
|
&policy.ACLPolicy{
|
||||||
|
ACLs: []policy.ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"*"},
|
||||||
|
Destinations: []string{"*:*"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TagOwners: map[string][]string{
|
||||||
|
"tag:exists": {"user1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
))
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
// Test list all nodes after added seconds
|
||||||
|
resultMachines := make([]*v1.Node, spec["user1"])
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"nodes",
|
||||||
|
"list",
|
||||||
|
"--tags",
|
||||||
|
"--output", "json",
|
||||||
|
},
|
||||||
|
&resultMachines,
|
||||||
|
)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
found := false
|
||||||
|
for _, node := range resultMachines {
|
||||||
|
if node.GetValidTags() != nil {
|
||||||
|
for _, tag := range node.GetValidTags() {
|
||||||
|
if tag == "tag:exists" {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
true,
|
||||||
|
found,
|
||||||
|
"should not find a node with the tag 'tag:exists' in the list of nodes",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNodeCommand(t *testing.T) {
|
func TestNodeCommand(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@@ -320,7 +320,6 @@ func TestTaildrop(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to install curl on %s, err: %s", client.Hostname(), err)
|
t.Fatalf("failed to install curl on %s, err: %s", client.Hostname(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
curlCommand := []string{"curl", "--unix-socket", "/var/run/tailscale/tailscaled.sock", "http://local-tailscaled.sock/localapi/v0/file-targets"}
|
curlCommand := []string{"curl", "--unix-socket", "/var/run/tailscale/tailscaled.sock", "http://local-tailscaled.sock/localapi/v0/file-targets"}
|
||||||
err = retry(10, 1*time.Second, func() error {
|
err = retry(10, 1*time.Second, func() error {
|
||||||
@@ -537,7 +536,7 @@ func TestExpireNode(t *testing.T) {
|
|||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
// Assert that we have the original count - self
|
// Assert that we have the original count - self
|
||||||
assert.Len(t, status.Peers(), len(MustTestVersions)-1)
|
assert.Len(t, status.Peers(), spec["user1"]-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
headscale, err := scenario.Headscale()
|
headscale, err := scenario.Headscale()
|
||||||
@@ -560,7 +559,7 @@ func TestExpireNode(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
|
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
|
||||||
|
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(2 * time.Minute)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
@@ -572,21 +571,33 @@ func TestExpireNode(t *testing.T) {
|
|||||||
if client.Hostname() != node.GetName() {
|
if client.Hostname() != node.GetName() {
|
||||||
t.Logf("available peers of %s: %v", client.Hostname(), status.Peers())
|
t.Logf("available peers of %s: %v", client.Hostname(), status.Peers())
|
||||||
|
|
||||||
// In addition to marking nodes expired, we filter them out during the map response
|
// Ensures that the node is present, and that it is expired.
|
||||||
// this check ensures that the node is either not present, or that it is expired
|
|
||||||
// if it is in the map response.
|
|
||||||
if peerStatus, ok := status.Peer[expiredNodeKey]; ok {
|
if peerStatus, ok := status.Peer[expiredNodeKey]; ok {
|
||||||
assertNotNil(t, peerStatus.Expired)
|
assertNotNil(t, peerStatus.Expired)
|
||||||
assert.Truef(t, peerStatus.KeyExpiry.Before(now), "node %s should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
|
assert.NotNil(t, peerStatus.KeyExpiry)
|
||||||
assert.Truef(t, peerStatus.Expired, "node %s should be expired, expired is %v", peerStatus.HostName, peerStatus.Expired)
|
|
||||||
|
t.Logf("node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
|
||||||
|
if peerStatus.KeyExpiry != nil {
|
||||||
|
assert.Truef(t, peerStatus.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Truef(t, peerStatus.Expired, "node %q should be expired, expired is %v", peerStatus.HostName, peerStatus.Expired)
|
||||||
|
|
||||||
|
_, stderr, _ := client.Execute([]string{"tailscale", "ping", node.GetName()})
|
||||||
|
if !strings.Contains(stderr, "node key has expired") {
|
||||||
|
t.Errorf("expected to be unable to ping expired host %q from %q", node.GetName(), client.Hostname())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired", node.GetName(), expiredNodeKey)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if status.Self.KeyExpiry != nil {
|
||||||
|
assert.Truef(t, status.Self.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", status.Self.HostName, now.String(), status.Self.KeyExpiry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(kradalby): We do not propogate expiry correctly, nodes should be aware
|
// NeedsLogin means that the node has understood that it is no longer
|
||||||
// of their status, and this should be sent directly to the node when its
|
// valid.
|
||||||
// expired. This needs a notifier that goes directly to the node (currently we only do peers)
|
assert.Equal(t, "NeedsLogin", status.BackendState)
|
||||||
// so fix this in a follow up PR.
|
|
||||||
// } else {
|
|
||||||
// assert.True(t, status.Self.Expired)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -9,10 +9,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/policy"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"tailscale.com/types/ipproto"
|
||||||
|
"tailscale.com/wgengine/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test is both testing the routes command and the propagation of
|
// This test is both testing the routes command and the propagation of
|
||||||
@@ -778,3 +783,413 @@ func TestHASubnetRouterFailover(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEnableDisableAutoApprovedRoute(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
expectedRoutes := "172.0.0.0/24"
|
||||||
|
|
||||||
|
user := "enable-disable-routing"
|
||||||
|
|
||||||
|
scenario, err := NewScenario()
|
||||||
|
assertNoErrf(t, "failed to create scenario: %s", err)
|
||||||
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
user: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
|
||||||
|
&policy.ACLPolicy{
|
||||||
|
ACLs: []policy.ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"*"},
|
||||||
|
Destinations: []string{"*:*"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TagOwners: map[string][]string{
|
||||||
|
"tag:approve": {user},
|
||||||
|
},
|
||||||
|
AutoApprovers: policy.AutoApprovers{
|
||||||
|
Routes: map[string][]string{
|
||||||
|
expectedRoutes: {"tag:approve"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
))
|
||||||
|
assertNoErrHeadscaleEnv(t, err)
|
||||||
|
|
||||||
|
allClients, err := scenario.ListTailscaleClients()
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErrGetHeadscale(t, err)
|
||||||
|
|
||||||
|
subRouter1 := allClients[0]
|
||||||
|
|
||||||
|
// Initially advertise route
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=" + expectedRoutes,
|
||||||
|
}
|
||||||
|
_, _, err = subRouter1.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to advertise route: %s", err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
var routes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routes, 1)
|
||||||
|
|
||||||
|
// All routes should be auto approved and enabled
|
||||||
|
assert.Equal(t, true, routes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, routes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, routes[0].GetIsPrimary())
|
||||||
|
|
||||||
|
// Stop advertising route
|
||||||
|
command = []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=",
|
||||||
|
}
|
||||||
|
_, _, err = subRouter1.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to remove advertised route: %s", err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
var notAdvertisedRoutes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
¬AdvertisedRoutes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, notAdvertisedRoutes, 1)
|
||||||
|
|
||||||
|
// Route is no longer advertised
|
||||||
|
assert.Equal(t, false, notAdvertisedRoutes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, false, notAdvertisedRoutes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, notAdvertisedRoutes[0].GetIsPrimary())
|
||||||
|
|
||||||
|
// Advertise route again
|
||||||
|
command = []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=" + expectedRoutes,
|
||||||
|
}
|
||||||
|
_, _, err = subRouter1.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to advertise route: %s", err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
var reAdvertisedRoutes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&reAdvertisedRoutes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, reAdvertisedRoutes, 1)
|
||||||
|
|
||||||
|
// All routes should be auto approved and enabled
|
||||||
|
assert.Equal(t, true, reAdvertisedRoutes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, reAdvertisedRoutes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSubnetRouteACL verifies that Subnet routes are distributed
|
||||||
|
// as expected when ACLs are activated.
|
||||||
|
// It implements the issue from
|
||||||
|
// https://github.com/juanfont/headscale/issues/1604
|
||||||
|
func TestSubnetRouteACL(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
user := "subnet-route-acl"
|
||||||
|
|
||||||
|
scenario, err := NewScenario()
|
||||||
|
assertNoErrf(t, "failed to create scenario: %s", err)
|
||||||
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
user: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
|
||||||
|
&policy.ACLPolicy{
|
||||||
|
Groups: policy.Groups{
|
||||||
|
"group:admins": {user},
|
||||||
|
},
|
||||||
|
ACLs: []policy.ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"group:admins:*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"10.33.0.0/16:*"},
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// Action: "accept",
|
||||||
|
// Sources: []string{"group:admins"},
|
||||||
|
// Destinations: []string{"0.0.0.0/0:*"},
|
||||||
|
// },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
))
|
||||||
|
assertNoErrHeadscaleEnv(t, err)
|
||||||
|
|
||||||
|
allClients, err := scenario.ListTailscaleClients()
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErrGetHeadscale(t, err)
|
||||||
|
|
||||||
|
expectedRoutes := map[string]string{
|
||||||
|
"1": "10.33.0.0/16",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort nodes by ID
|
||||||
|
sort.SliceStable(allClients, func(i, j int) bool {
|
||||||
|
statusI, err := allClients[i].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
statusJ, err := allClients[j].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusI.Self.ID < statusJ.Self.ID
|
||||||
|
})
|
||||||
|
|
||||||
|
subRouter1 := allClients[0]
|
||||||
|
|
||||||
|
client := allClients[1]
|
||||||
|
|
||||||
|
// advertise HA route on node 1 and 2
|
||||||
|
// ID 1 will be primary
|
||||||
|
// ID 2 will be secondary
|
||||||
|
for _, client := range allClients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
if route, ok := expectedRoutes[string(status.Self.ID)]; ok {
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=" + route,
|
||||||
|
}
|
||||||
|
_, _, err = client.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to advertise route: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
var routes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routes,
|
||||||
|
)
|
||||||
|
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routes, 1)
|
||||||
|
|
||||||
|
for _, route := range routes {
|
||||||
|
assert.Equal(t, true, route.GetAdvertised())
|
||||||
|
assert.Equal(t, false, route.GetEnabled())
|
||||||
|
assert.Equal(t, false, route.GetIsPrimary())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that no routes has been sent to the client,
|
||||||
|
// they are not yet enabled.
|
||||||
|
for _, client := range allClients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
for _, peerKey := range status.Peers() {
|
||||||
|
peerStatus := status.Peer[peerKey]
|
||||||
|
|
||||||
|
assert.Nil(t, peerStatus.PrimaryRoutes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable all routes
|
||||||
|
for _, route := range routes {
|
||||||
|
_, err = headscale.Execute(
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"enable",
|
||||||
|
"--route",
|
||||||
|
strconv.Itoa(int(route.GetId())),
|
||||||
|
})
|
||||||
|
assertNoErr(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
var enablingRoutes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&enablingRoutes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, enablingRoutes, 1)
|
||||||
|
|
||||||
|
// Node 1 has active route
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary())
|
||||||
|
|
||||||
|
// Verify that the client has routes from the primary machine
|
||||||
|
srs1, _ := subRouter1.Status()
|
||||||
|
|
||||||
|
clientStatus, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey]
|
||||||
|
|
||||||
|
assertNotNil(t, srs1PeerStatus.PrimaryRoutes)
|
||||||
|
|
||||||
|
t.Logf("subnet1 has following routes: %v", srs1PeerStatus.PrimaryRoutes.AsSlice())
|
||||||
|
assert.Len(t, srs1PeerStatus.PrimaryRoutes.AsSlice(), 1)
|
||||||
|
assert.Contains(
|
||||||
|
t,
|
||||||
|
srs1PeerStatus.PrimaryRoutes.AsSlice(),
|
||||||
|
netip.MustParsePrefix(expectedRoutes[string(srs1.Self.ID)]),
|
||||||
|
)
|
||||||
|
|
||||||
|
clientNm, err := client.Netmap()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
wantClientFilter := []filter.Match{
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" {
|
||||||
|
t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
subnetNm, err := subRouter1.Netmap()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
wantSubnetFilter := []filter.Match{
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("10.33.0.0/16"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" {
|
||||||
|
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -13,8 +13,10 @@ run_tests() {
|
|||||||
|
|
||||||
for ((i = 1; i <= num_tests; i++)); do
|
for ((i = 1; i <= num_tests; i++)); do
|
||||||
docker network prune -f >/dev/null 2>&1
|
docker network prune -f >/dev/null 2>&1
|
||||||
docker rm headscale-test-suite || true
|
docker rm headscale-test-suite >/dev/null 2>&1 || true
|
||||||
docker kill "$(docker ps -q)" || true
|
docker kill "$(docker ps -q)" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
echo "Run $i"
|
||||||
|
|
||||||
start=$(date +%s)
|
start=$(date +%s)
|
||||||
docker run \
|
docker run \
|
||||||
|
@@ -47,19 +47,19 @@ var (
|
|||||||
tailscaleVersions2021 = map[string]bool{
|
tailscaleVersions2021 = map[string]bool{
|
||||||
"head": true,
|
"head": true,
|
||||||
"unstable": true,
|
"unstable": true,
|
||||||
"1.56": true, // CapVer: 82
|
"1.56": true, // CapVer: 82
|
||||||
"1.54": true, // CapVer: 79
|
"1.54": true, // CapVer: 79
|
||||||
"1.52": true, // CapVer: 79
|
"1.52": true, // CapVer: 79
|
||||||
"1.50": true, // CapVer: 74
|
"1.50": true, // CapVer: 74
|
||||||
"1.48": true, // CapVer: 68
|
"1.48": true, // CapVer: 68
|
||||||
"1.46": true, // CapVer: 65
|
"1.46": true, // CapVer: 65
|
||||||
"1.44": true, // CapVer: 63
|
"1.44": true, // CapVer: 63
|
||||||
"1.42": true, // CapVer: 61
|
"1.42": true, // CapVer: 61
|
||||||
"1.40": true, // CapVer: 61
|
"1.40": true, // CapVer: 61
|
||||||
"1.38": true, // CapVer: 58
|
"1.38": true, // CapVer: 58
|
||||||
"1.36": true, // CapVer: 56
|
"1.36": true, // Oldest supported version, CapVer: 56
|
||||||
"1.34": true, // CapVer: 51
|
"1.34": false, // CapVer: 51
|
||||||
"1.32": true, // Oldest supported version, CapVer: 46
|
"1.32": false, // CapVer: 46
|
||||||
"1.30": false,
|
"1.30": false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -142,7 +142,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("create-tailscale", func(t *testing.T) {
|
t.Run("create-tailscale", func(t *testing.T) {
|
||||||
err := scenario.CreateTailscaleNodesInUser(user, "1.30.2", count)
|
err := scenario.CreateTailscaleNodesInUser(user, "unstable", count)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to add tailscale nodes: %s", err)
|
t.Fatalf("failed to add tailscale nodes: %s", err)
|
||||||
}
|
}
|
||||||
|
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
"tailscale.com/types/netmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint
|
// nolint
|
||||||
@@ -26,6 +27,7 @@ type TailscaleClient interface {
|
|||||||
IPs() ([]netip.Addr, error)
|
IPs() ([]netip.Addr, error)
|
||||||
FQDN() (string, error)
|
FQDN() (string, error)
|
||||||
Status() (*ipnstate.Status, error)
|
Status() (*ipnstate.Status, error)
|
||||||
|
Netmap() (*netmap.NetworkMap, error)
|
||||||
WaitForNeedsLogin() error
|
WaitForNeedsLogin() error
|
||||||
WaitForRunning() error
|
WaitForRunning() error
|
||||||
WaitForPeers(expected int) error
|
WaitForPeers(expected int) error
|
||||||
|
@@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/ory/dockertest/v3"
|
"github.com/ory/dockertest/v3"
|
||||||
"github.com/ory/dockertest/v3/docker"
|
"github.com/ory/dockertest/v3/docker"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
"tailscale.com/types/netmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -519,6 +520,30 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
|
|||||||
return &status, err
|
return &status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
|
||||||
|
// Only works with Tailscale 1.56.1 and newer.
|
||||||
|
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"debug",
|
||||||
|
"netmap",
|
||||||
|
}
|
||||||
|
|
||||||
|
result, stderr, err := t.Execute(command)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("stderr: %s\n", stderr)
|
||||||
|
return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nm netmap.NetworkMap
|
||||||
|
err = json.Unmarshal([]byte(result), &nm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &nm, err
|
||||||
|
}
|
||||||
|
|
||||||
// FQDN returns the FQDN as a string of the Tailscale instance.
|
// FQDN returns the FQDN as a string of the Tailscale instance.
|
||||||
func (t *TailscaleInContainer) FQDN() (string, error) {
|
func (t *TailscaleInContainer) FQDN() (string, error) {
|
||||||
if t.fqdn != "" {
|
if t.fqdn != "" {
|
||||||
|
Reference in New Issue
Block a user