Compare commits

..

38 Commits

Author SHA1 Message Date
Kristoffer Dalby
88af29d5f5 derp
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 16:55:35 +01:00
Kristoffer Dalby
9cedc2942b verbose and test
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 16:34:13 +01:00
Kristoffer Dalby
062b9a5611 test
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:49:02 +01:00
Kristoffer Dalby
887302e8f1 setup ko image builder for goreleaser
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:38:33 +01:00
Kristoffer Dalby
b1b90d165d make dockerfiles testing only note
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:36:41 +01:00
derelm
4ea12f472a Fix failover to disabled route #1706 (#1707)
* fix #1706 - failover should disregard disabled routes during failover

* fixe tests for failover; all current tests assume routes to be enabled

* add testcase for #1706 - failover to disabled route
2024-02-03 15:30:15 +01:00
danielalvsaaker
b4210e2c90 Trim client secret after reading from file (#1697)
Reading from file will include a line break, which results in a mismatching client secret
compared to reading directly from the config.
2024-01-25 09:53:34 +01:00
dyz
a369d57a17 fix node expire error due to type in gorm model Update (#1692)
Fixes #1674

Signed-off-by: fortitude.zhang <fortitude.zhang@gmail.com>
2024-01-21 17:38:24 +01:00
Kristoffer Dalby
1e22f17f36 node selfupdate and fix subnet router when ACL is enabled (#1673)
Fixes #1604

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-01-18 17:30:25 +01:00
Kristoffer Dalby
65376e2842 ensure renabled auto-approve routes works (#1670) 2024-01-18 16:36:47 +01:00
Alexander Halbarth
7e8bf4bfe5 Add Customization Options to DERP Map entry of integrated DERP server (#1565)
Co-authored-by: Alexander Halbarth <alexander.halbarth@alite.at>
Co-authored-by: Bela Lemle <bela.lemle@alite.at>
Co-authored-by: Kristoffer Dalby <kristoffer@dalby.cc>
2024-01-16 16:04:03 +01:00
Kristoffer Dalby
3b103280ef implement selfupdate and pass expiry (#1647) 2024-01-05 10:41:56 +01:00
Kristoffer Dalby
a592ae56b4 fix issue where advertise tags causes hang (#1669)
Fixes #1665

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-01-04 21:26:49 +01:00
Kristoffer Dalby
054b06d45d add 1.54 and 1.56 to integration tests (#1652)
* add 1.54 and 1.56 to integration tests

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* fix bug where we tested random versions, now sorted

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

---------

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-01-02 10:41:40 +01:00
Kristoffer Dalby
55ca078f22 embed (hidden) tailsql for debugging (#1663)
Signed-off-by: Kristoffer Dalby <kristoffer@dalby.cc>
2023-12-20 21:47:48 +01:00
Kristoffer Dalby
6049ec758c add versioned migrations (#1644) 2023-12-10 15:46:14 +01:00
Kristoffer Dalby
ac910fd44c make stale shorter (#1646) 2023-12-10 15:30:30 +01:00
Kristoffer Dalby
9982ae5f09 add breaking entry of derp priv key (#1641) 2023-12-10 15:23:23 +01:00
Kristoffer Dalby
cf8ffea154 turn off grpc communication logging (#1640) 2023-12-10 15:22:59 +01:00
Kristoffer Dalby
790bbe5e8d fix hostinfo db column spelling (#1642) 2023-12-10 15:22:26 +01:00
github-actions[bot]
2c8fc9b061 Update flake.lock (#1632)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2023-12-10 09:50:39 +01:00
github-actions[bot]
b359939812 docs(README): update contributors (#1639)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2023-12-10 08:56:01 +01:00
Kristoffer Dalby
f65f4eca35 ensure online status and route changes are propagated (#1564) 2023-12-09 18:09:24 +01:00
Kristoffer Dalby
0153e26392 upgrade go dependencies (#1628) 2023-11-30 14:41:31 +01:00
Andrei Pechkurov
6c9c55774b Update xsync to v3.0.2 (#1597)
Co-authored-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-11-29 15:47:14 +01:00
github-actions[bot]
2f558bee80 Update flake.lock (#1598)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2023-11-29 15:11:28 +01:00
Azamat H. Hackimov
4c608a4b58 Fix Github Actions docs pipeline (#1622) 2023-11-29 15:11:00 +01:00
JesseBot
f13cf64578 Docs: Update running-headscale-container.md - fix link to example config (#1618) 2023-11-29 15:10:21 +01:00
MichaelKo
85e92db505 Enhance pipeline stability and automatically retry unstable tests (#1566)
* add test retry to action

* add test retry to action
2023-11-27 18:32:52 +01:00
Kristoffer Dalby
a59aab2081 Remove support for non-noise clients (pre-1.32) (#1611) 2023-11-23 08:31:33 +01:00
Kristoffer Dalby
b918aa03fc move to use tailscfg types over strings/custom types (#1612)
* rename database only fields

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* use correct endpoint type over string list

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* remove HostInfo wrapper

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* wrap errors in database hooks

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

---------

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-11-21 18:20:06 +01:00
Kristoffer Dalby
ed4e19996b Use tailscale key types instead of strings (#1609)
* upgrade tailscale

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* make Node object use actualy tailscale key types

This commit changes the Node struct to have both a field for strings
to store the keys in the database and a dedicated Key for each type
of key.

The keys are populated and stored with Gorm hooks to ensure the data
is stored in the db.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* use key types throughout the code

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* make sure machinekey is concistently used

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* use machine key in auth url

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* fix web register

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* use key type in notifier

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* fix relogin with webauth

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

---------

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-11-19 22:37:04 +01:00
Kristoffer Dalby
c0fd06e3f5 remove the use key stripping and store the proper keys (#1603) 2023-11-16 17:55:29 +01:00
github-actions[bot]
2af71c9e31 docs(README): update contributors (#1592)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2023-11-06 14:07:35 +01:00
Lucalux
42b7f8f65a redundant line removed from systemd.service (#1587) 2023-11-06 13:45:34 +01:00
github-actions[bot]
48c7d763d5 Update flake.lock (#1589)
Flake lock file updates:

• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/e12483116b3b51a185a33a272bf351e357ba9a99' (2023-09-21)
  → 'github:NixOS/nixpkgs/a0b3b06b7a82c965ae0bb1d59f6e386fe755001d' (2023-11-05)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2023-11-06 12:57:06 +01:00
Kristoffer Dalby
d0d6438337 Add workflow to autoupdate flake.lock deps (#1588)
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-11-06 12:48:52 +01:00
Kristoffer Dalby
fb4ed95ff6 Upgrade Go 1.21, Tailscale 1.50 and add Capability version support (#1563) 2023-09-28 12:33:53 -07:00
128 changed files with 6439 additions and 3107 deletions

View File

@@ -26,7 +26,7 @@ jobs:
key: ${{ github.ref }}
path: .cache
- name: Setup dependencies
run: pip install mkdocs-material pillow cairosvg mkdocs-minify-plugin
run: pip install -r docs/requirements.txt
- name: Build docs
run: mkdocs build --strict
- name: Upload artifact

View File

@@ -20,6 +20,6 @@ jobs:
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Run goreleaser
run: nix develop --command -- goreleaser release --clean
run: nix develop --command -- goreleaser release --clean --verbose
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -12,10 +12,10 @@ jobs:
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: 180
days-before-issue-close: 14
days-before-issue-stale: 90
days-before-issue-close: 7
stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 180 days with no activity."
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLAllowStarDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLAllowUser80Dst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLAllowUserDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLDenyAllPort80
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLDevice1CanAccessDevice2
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLHostsInNetMapTable
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLNamedHostsCanReach
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestACLNamedHostsCanReachBySubnet
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestApiKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestAuthKeyLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestAuthWebFlowAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestAuthWebFlowLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestCreateTailscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestDERPServerScenario
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnableDisableAutoApprovedRoute
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestEnableDisableAutoApprovedRoute:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestEnableDisableAutoApprovedRoute
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnableDisableAutoApprovedRoute$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestEnablingRoutes
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestExpireNode
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHASubnetRouterFailover
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestHASubnetRouterFailover:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestHASubnetRouterFailover
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHASubnetRouterFailover$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestHeadscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagNoACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagNoACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagNoACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagWithACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagWithACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagWithACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestNodeCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestNodeExpireCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestNodeMoveCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeOnlineLastSeenStatus
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeOnlineLastSeenStatus:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeOnlineLastSeenStatus
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeOnlineLastSeenStatus$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestNodeRenameCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestNodeTagCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestOIDCAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestOIDCExpireNodesBasedOnTokenExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestPingAllByHostname
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestPingAllByIP
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestPreAuthKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestPreAuthKeyCommandReusableEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestPreAuthKeyCommandWithoutExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestResolveMagicDNS
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestSSHIsBlockedInACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestSSHMultipleUsersAllToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestSSHNoSSHConfigured
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestSSHOneUserToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestSSHUserOnlyIsolation
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSubnetRouteACL
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSubnetRouteACL:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSubnetRouteACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSubnetRouteACL$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestTaildrop
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestTailscaleNodesJoiningHeadcale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -35,8 +35,11 @@ jobs:
config-example.yaml
- name: Run TestUserCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -46,7 +49,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

18
.github/workflows/update-flake.yml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: update-flake-lock
on:
workflow_dispatch: # allows manual triggering
schedule:
- cron: "0 0 * * 0" # runs weekly on Sunday at 00:00
jobs:
lockfile:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- name: Update flake.lock
uses: DeterminateSystems/update-flake-lock@main
with:
pr-title: "Update flake.lock"

1
.gitignore vendored
View File

@@ -1,5 +1,6 @@
ignored/
tailscale/
.vscode/
# Binaries for programs and plugins
*.exe

View File

@@ -9,20 +9,20 @@ release:
builds:
- id: headscale
main: ./cmd/headscale/headscale.go
main: ./cmd/headscale
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
targets:
- darwin_amd64
- darwin_arm64
- freebsd_amd64
- linux_386
# - darwin_amd64
# - darwin_arm64
# - freebsd_amd64
# - linux_386
- linux_amd64
- linux_arm64
- linux_arm_5
- linux_arm_6
- linux_arm_7
# - linux_arm64
# - linux_arm_5
# - linux_arm_6
# - linux_arm_7
flags:
- -mod=readonly
ldflags:
@@ -63,7 +63,6 @@ nfpms:
bindir: /usr/bin
formats:
- deb
# - rpm
contents:
- src: ./config-example.yaml
dst: /etc/headscale/config.yaml
@@ -80,6 +79,43 @@ nfpms:
postinstall: ./docs/packaging/postinstall.sh
postremove: ./docs/packaging/postremove.sh
kos:
- id: ghcr
build: headscale
main: ./cmd/headscale
base_image: gcr.io/distroless/base-debian11
repository: ghcr.io/juanfont/headscale
env:
- CGO_ENABLED=0
platforms:
- linux/amd64
# - linux/386
# - linux/arm64
# - linux/arm/v7
# - linux/arm/v6
# - linux/arm/v5
tags:
- latest
- '{{.Tag}}'
- '{{ .Major }}.{{ .Minor }}'
- '{{ .Major }}'
- '{{ if not .Prerelease }}stable{{ end }}'
# - id: dockerhub
# build: headscale
# base_image: gcr.io/distroless/base-debian11
# repository: headscale/headscale
# platforms:
# - linux/amd64
# - linux/386
# - linux/arm64
# - linux/arm/v7
# - linux/arm/v6
# - linux/arm/v5
# tags:
# - latest
# - '{{.Tag}}'
checksum:
name_template: "checksums.txt"
snapshot:

View File

@@ -23,17 +23,27 @@ after improving the test harness as part of adopting [#1460](https://github.com/
### BREAKING
Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
- The latest supported client is 1.36
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
- Add a filepath entry to [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)
### Changes
Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)
Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)
SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)
State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492)
Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on [#1460](https://github.com/juanfont/headscale/pull/1460)
Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480)
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
## 0.22.3 (2023-05-12)

View File

@@ -1,4 +1,7 @@
# Builder image
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.21-bookworm AS build
ARG VERSION=dev
ENV GOPATH /go
@@ -9,7 +12,7 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN strip /go/bin/headscale
RUN test -e /go/bin/headscale
@@ -17,9 +20,9 @@ RUN test -e /go/bin/headscale
FROM docker.io/debian:bookworm-slim
RUN apt-get update \
&& apt-get install -y ca-certificates \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
&& apt-get install -y ca-certificates \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC

View File

@@ -1,5 +1,8 @@
# Builder image
FROM docker.io/golang:1.20-bullseye AS build
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.21-bookworm AS build
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
@@ -9,19 +12,19 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN test -e /go/bin/headscale
# Debug image
FROM docker.io/golang:1.20.0-bullseye
FROM docker.io/golang:1.21-bookworm
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC
RUN apt-get update \
&& apt-get install --no-install-recommends --yes less jq \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
&& apt-get install --no-install-recommends --yes less jq \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
RUN mkdir -p /var/run/headscale
# Need to reset the entrypoint or everything will run as a busybox script

View File

@@ -1,3 +1,7 @@
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM golang:latest
RUN apt-get update \

View File

@@ -10,8 +10,6 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
else
endif
TAGS = -tags ts2019
# GO_SOURCES = $(wildcard *.go)
# PROTO_SOURCES = $(wildcard **/*.proto)
GO_SOURCES = $(call rwildcard,,*.go)
@@ -24,7 +22,7 @@ build:
dev: lint test build
test:
gotestsum -- $(TAGS) -short -coverprofile=coverage.out ./...
gotestsum -- -short -coverprofile=coverage.out ./...
test_integration:
docker run \
@@ -34,7 +32,7 @@ test_integration:
-v $$PWD:$$PWD -w $$PWD/integration \
-v /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast ./... -timeout 120m -parallel 8
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
lint:
golangci-lint run --fix --timeout 10m

110
README.md
View File

@@ -466,6 +466,13 @@ make build
<sub style="font-size:14px"><b>unreality</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/vsychov>
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
<br />
<sub style="font-size:14px"><b>MichaelKo</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kevin1sMe>
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
@@ -473,6 +480,8 @@ make build
<sub style="font-size:14px"><b>kevinlin</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/QZAiXH>
<img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/>
@@ -480,8 +489,6 @@ make build
<sub style="font-size:14px"><b>Snack</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/artemklevtsov>
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
@@ -517,6 +524,8 @@ make build
<sub style="font-size:14px"><b>LIU HANCHENG</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/motiejus>
<img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/>
@@ -524,8 +533,6 @@ make build
<sub style="font-size:14px"><b>Motiejus Jakštys</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pvinis>
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
@@ -547,13 +554,6 @@ make build
<sub style="font-size:14px"><b>Steven Honson</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/vsychov>
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
<br />
<sub style="font-size:14px"><b>MichaelKo</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ratsclub>
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
@@ -577,6 +577,13 @@ make build
<sub style="font-size:14px"><b>thomas</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/linsomniac>
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
@@ -598,13 +605,6 @@ make build
<sub style="font-size:14px"><b>Albert Copeland</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/theryecatcher>
<img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/>
@@ -658,6 +658,13 @@ make build
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/winterheart>
<img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/>
<br />
<sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/stensonb>
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
@@ -693,6 +700,8 @@ make build
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/felixonmars>
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
@@ -700,8 +709,6 @@ make build
<sub style="font-size:14px"><b>Felix Yan</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/gabe565>
<img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/>
@@ -723,6 +730,13 @@ make build
<sub style="font-size:14px"><b>hrtkpf</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jessebot>
<img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/>
<br />
<sub style="font-size:14px"><b>JesseBot</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimt>
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
@@ -730,6 +744,8 @@ make build
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jsiebens>
<img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/>
@@ -744,8 +760,6 @@ make build
<sub style="font-size:14px"><b>John Axel Eriksson</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ShadowJonathan>
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
@@ -767,6 +781,15 @@ make build
<sub style="font-size:14px"><b>Kurnia D Win</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Lucalux>
<img src=https://avatars.githubusercontent.com/u/70356955?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Lucalux/>
<br />
<sub style="font-size:14px"><b>Lucalux</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/foxtrot>
<img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/>
@@ -774,13 +797,6 @@ make build
<sub style="font-size:14px"><b>Marc</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/magf>
<img src=https://avatars.githubusercontent.com/u/11992737?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Maxim Gajdaj/>
<br />
<sub style="font-size:14px"><b>Maxim Gajdaj</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mhameed>
<img src=https://avatars.githubusercontent.com/u/447017?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mesar Hameed/>
@@ -788,8 +804,6 @@ make build
<sub style="font-size:14px"><b>Mesar Hameed</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mikejsavage>
<img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/>
@@ -812,12 +826,14 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Donran>
<a href=https://github.com/donran>
<img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/>
<br />
<sub style="font-size:14px"><b>Pontus N</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nnsee>
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
@@ -832,8 +848,6 @@ make build
<sub style="font-size:14px"><b>rcursaru</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/renovate-bot>
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
@@ -850,9 +864,9 @@ make build
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/muzy>
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian Muszytowski/>
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/>
<br />
<sub style="font-size:14px"><b>Sebastian Muszytowski</b></sub>
<sub style="font-size:14px"><b>Sebastian</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -862,6 +876,8 @@ make build
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/6ixfalls>
<img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/>
@@ -876,8 +892,6 @@ make build
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/sophware>
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
@@ -906,6 +920,8 @@ make build
<sub style="font-size:14px"><b>The Gitter Badger</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/tianon>
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
@@ -920,8 +936,6 @@ make build
<sub style="font-size:14px"><b>Till Hoffmann</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/woudsma>
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
@@ -937,7 +951,7 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/sleepymole1>
<a href=https://github.com/sleepymole>
<img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/>
<br />
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
@@ -950,6 +964,8 @@ make build
<sub style="font-size:14px"><b>Zachary Newell</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/zekker6>
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
@@ -964,8 +980,6 @@ make build
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Bpazy>
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
@@ -994,6 +1008,8 @@ make build
<sub style="font-size:14px"><b>dnaq</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nning>
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
@@ -1008,8 +1024,6 @@ make build
<sub style="font-size:14px"><b>ignoramous</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimyag>
<img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/>
@@ -1038,6 +1052,8 @@ make build
<sub style="font-size:14px"><b>ma6174</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/manju-rn>
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
@@ -1052,8 +1068,6 @@ make build
<sub style="font-size:14px"><b>nicholas-yap</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pernila>
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/>
@@ -1082,6 +1096,8 @@ make build
<sub style="font-size:14px"><b>zy</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/atorregrosa-smd>
<img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/>

View File

@@ -1,47 +0,0 @@
package main
import (
"log"
"github.com/juanfont/headscale/integration"
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
)
func main() {
log.Printf("creating docker pool")
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("could not connect to docker: %s", err)
}
log.Printf("creating docker network")
network, err := pool.CreateNetwork("docker-integration-net")
if err != nil {
log.Fatalf("failed to create or get network: %s", err)
}
for _, version := range integration.AllVersions {
log.Printf("creating container image for Tailscale (%s)", version)
tsClient, err := tsic.New(
pool,
version,
network,
)
if err != nil {
log.Fatalf("failed to create tailscale node: %s", err)
}
err = tsClient.Shutdown()
if err != nil {
log.Fatalf("failed to shut down container: %s", err)
}
}
network.Close()
err = pool.RemoveNetwork(network)
if err != nil {
log.Fatalf("failed to remove network: %s", err)
}
}

View File

@@ -56,8 +56,11 @@ jobs:
config-example.yaml
- name: Run {{.Name}}
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
run: |
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
@@ -67,7 +70,6 @@ jobs:
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \

View File

@@ -67,7 +67,7 @@ var listAPIKeys = &cobra.Command{
}
if output != "" {
SuccessOutput(response.ApiKeys, "", output)
SuccessOutput(response.GetApiKeys(), "", output)
return
}
@@ -75,11 +75,11 @@ var listAPIKeys = &cobra.Command{
tableData := pterm.TableData{
{"ID", "Prefix", "Expiration", "Created"},
}
for _, key := range response.ApiKeys {
for _, key := range response.GetApiKeys() {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.Expiration.AsTime())
expiration = ColourTime(key.GetExpiration().AsTime())
}
tableData = append(tableData, []string{
@@ -155,7 +155,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
return
}
SuccessOutput(response.ApiKey, response.ApiKey, output)
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
},
}

View File

@@ -4,10 +4,10 @@ import (
"fmt"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"tailscale.com/types/key"
)
const (
@@ -93,11 +93,13 @@ var createNodeCmd = &cobra.Command{
return
}
if !util.NodePublicKeyRegex.Match([]byte(machineKey)) {
err = errPreAuthKeyMalformed
var mkey key.MachinePublic
err = mkey.UnmarshalText([]byte(machineKey))
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error: %s", err),
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
output,
)
@@ -133,6 +135,6 @@ var createNodeCmd = &cobra.Command{
return
}
SuccessOutput(response.Node, "Node created", output)
SuccessOutput(response.GetNode(), "Node created", output)
},
}

View File

@@ -152,8 +152,8 @@ var registerNodeCmd = &cobra.Command{
}
SuccessOutput(
response.Node,
fmt.Sprintf("Node %s registered", response.Node.GivenName), output)
response.GetNode(),
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
},
}
@@ -196,12 +196,12 @@ var listNodesCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.Nodes, "", output)
SuccessOutput(response.GetNodes(), "", output)
return
}
tableData, err := nodesToPtables(user, showTags, response.Nodes)
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
@@ -262,7 +262,7 @@ var expireNodeCmd = &cobra.Command{
return
}
SuccessOutput(response.Node, "Node expired", output)
SuccessOutput(response.GetNode(), "Node expired", output)
},
}
@@ -310,7 +310,7 @@ var renameNodeCmd = &cobra.Command{
return
}
SuccessOutput(response.Node, "Node renamed", output)
SuccessOutput(response.GetNode(), "Node renamed", output)
},
}
@@ -364,7 +364,7 @@ var deleteNodeCmd = &cobra.Command{
prompt := &survey.Confirm{
Message: fmt.Sprintf(
"Do you want to remove the node %s?",
getResponse.GetNode().Name,
getResponse.GetNode().GetName(),
),
}
err = survey.AskOne(prompt, &confirm)
@@ -473,7 +473,7 @@ var moveNodeCmd = &cobra.Command{
return
}
SuccessOutput(moveResponse.Node, "Node moved to another user", output)
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
},
}
@@ -493,7 +493,7 @@ func nodesToPtables(
"Ephemeral",
"Last seen",
"Expiration",
"Online",
"Connected",
"Expired",
}
if showTags {
@@ -507,21 +507,21 @@ func nodesToPtables(
for _, node := range nodes {
var ephemeral bool
if node.PreAuthKey != nil && node.PreAuthKey.Ephemeral {
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
ephemeral = true
}
var lastSeen time.Time
var lastSeenTime string
if node.LastSeen != nil {
lastSeen = node.LastSeen.AsTime()
if node.GetLastSeen() != nil {
lastSeen = node.GetLastSeen().AsTime()
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
}
var expiry time.Time
var expiryTime string
if node.Expiry != nil {
expiry = node.Expiry.AsTime()
if node.GetExpiry() != nil {
expiry = node.GetExpiry().AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05")
} else {
expiryTime = "N/A"
@@ -529,7 +529,7 @@ func nodesToPtables(
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
[]byte(node.GetMachineKey()),
)
if err != nil {
machineKey = key.MachinePublic{}
@@ -537,14 +537,14 @@ func nodesToPtables(
var nodeKey key.NodePublic
err = nodeKey.UnmarshalText(
[]byte(util.NodePublicKeyEnsurePrefix(node.NodeKey)),
[]byte(node.GetNodeKey()),
)
if err != nil {
return nil, err
}
var online string
if node.Online {
if node.GetOnline() {
online = pterm.LightGreen("online")
} else {
online = pterm.LightRed("offline")
@@ -558,36 +558,36 @@ func nodesToPtables(
}
var forcedTags string
for _, tag := range node.ForcedTags {
for _, tag := range node.GetForcedTags() {
forcedTags += "," + tag
}
forcedTags = strings.TrimLeft(forcedTags, ",")
var invalidTags string
for _, tag := range node.InvalidTags {
if !contains(node.ForcedTags, tag) {
for _, tag := range node.GetInvalidTags() {
if !contains(node.GetForcedTags(), tag) {
invalidTags += "," + pterm.LightRed(tag)
}
}
invalidTags = strings.TrimLeft(invalidTags, ",")
var validTags string
for _, tag := range node.ValidTags {
if !contains(node.ForcedTags, tag) {
for _, tag := range node.GetValidTags() {
if !contains(node.GetForcedTags(), tag) {
validTags += "," + pterm.LightGreen(tag)
}
}
validTags = strings.TrimLeft(validTags, ",")
var user string
if currentUser == "" || (currentUser == node.User.Name) {
user = pterm.LightMagenta(node.User.Name)
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
user = pterm.LightMagenta(node.GetUser().GetName())
} else {
// Shared into this user
user = pterm.LightYellow(node.User.Name)
user = pterm.LightYellow(node.GetUser().GetName())
}
var IPV4Address string
var IPV6Address string
for _, addr := range node.IpAddresses {
for _, addr := range node.GetIpAddresses() {
if netip.MustParseAddr(addr).Is4() {
IPV4Address = addr
} else {
@@ -596,8 +596,8 @@ func nodesToPtables(
}
nodeData := []string{
strconv.FormatUint(node.Id, util.Base10),
node.Name,
strconv.FormatUint(node.GetId(), util.Base10),
node.GetName(),
node.GetGivenName(),
machineKey.ShortString(),
nodeKey.ShortString(),

View File

@@ -84,7 +84,7 @@ var listPreAuthKeys = &cobra.Command{
}
if output != "" {
SuccessOutput(response.PreAuthKeys, "", output)
SuccessOutput(response.GetPreAuthKeys(), "", output)
return
}
@@ -101,10 +101,10 @@ var listPreAuthKeys = &cobra.Command{
"Tags",
},
}
for _, key := range response.PreAuthKeys {
for _, key := range response.GetPreAuthKeys() {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.Expiration.AsTime())
expiration = ColourTime(key.GetExpiration().AsTime())
}
var reusable string
@@ -116,7 +116,7 @@ var listPreAuthKeys = &cobra.Command{
aclTags := ""
for _, tag := range key.AclTags {
for _, tag := range key.GetAclTags() {
aclTags += "," + tag
}
@@ -214,7 +214,7 @@ var createPreAuthKeyCmd = &cobra.Command{
return
}
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
},
}

View File

@@ -87,12 +87,12 @@ var listRoutesCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.Routes, "", output)
SuccessOutput(response.GetRoutes(), "", output)
return
}
routes = response.Routes
routes = response.GetRoutes()
} else {
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
NodeId: machineID,
@@ -108,12 +108,12 @@ var listRoutesCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.Routes, "", output)
SuccessOutput(response.GetRoutes(), "", output)
return
}
routes = response.Routes
routes = response.GetRoutes()
}
tableData := routesToPtables(routes)
@@ -271,25 +271,25 @@ func routesToPtables(routes []*v1.Route) pterm.TableData {
for _, route := range routes {
var isPrimaryStr string
prefix, err := netip.ParsePrefix(route.Prefix)
prefix, err := netip.ParsePrefix(route.GetPrefix())
if err != nil {
log.Printf("Error parsing prefix %s: %s", route.Prefix, err)
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
continue
}
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
isPrimaryStr = "-"
} else {
isPrimaryStr = strconv.FormatBool(route.IsPrimary)
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
}
tableData = append(tableData,
[]string{
strconv.FormatUint(route.Id, Base10),
route.Node.GivenName,
route.Prefix,
strconv.FormatBool(route.Advertised),
strconv.FormatBool(route.Enabled),
strconv.FormatUint(route.GetId(), Base10),
route.GetNode().GetGivenName(),
route.GetPrefix(),
strconv.FormatBool(route.GetAdvertised()),
strconv.FormatBool(route.GetEnabled()),
isPrimaryStr,
})
}

View File

@@ -67,7 +67,7 @@ var createUserCmd = &cobra.Command{
return
}
SuccessOutput(response.User, "User created", output)
SuccessOutput(response.GetUser(), "User created", output)
},
}
@@ -169,7 +169,7 @@ var listUsersCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.Users, "", output)
SuccessOutput(response.GetUsers(), "", output)
return
}
@@ -236,6 +236,6 @@ var renameUserCmd = &cobra.Command{
return
}
SuccessOutput(response.User, "User renamed", output)
SuccessOutput(response.GetUser(), "User renamed", output)
},
}

View File

@@ -40,19 +40,12 @@ grpc_listen_addr: 127.0.0.1:50443
# are doing.
grpc_allow_insecure: false
# Private key used to encrypt the traffic between headscale
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/private.key
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol. It must be different
# from the legacy private key.
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
@@ -95,6 +88,22 @@ derp:
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default

View File

@@ -26,7 +26,6 @@ ProcSubset=pid
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHome=yes
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true

5
docs/requirements.txt Normal file
View File

@@ -0,0 +1,5 @@
cairosvg~=2.7.1
mkdocs-material~=9.4.14
mkdocs-minify-plugin~=0.7.1
pillow~=10.1.0

View File

@@ -28,7 +28,7 @@ cd ./headscale
touch ./config/db.sqlite
```
3. **(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
3. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
Using wget:

12
flake.lock generated
View File

@@ -5,11 +5,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1692799911,
"narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=",
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1693844670,
"narHash": "sha256-t69F2nBB8DNQUWHD809oJZJVE+23XBrth4QZuVd6IE0=",
"lastModified": 1701998057,
"narHash": "sha256-gAJGhcTO9cso7XDfAScXUlPcva427AUT2q02qrmXPdo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3c15feef7770eb5500a4b8792623e2d6f598c9c1",
"rev": "09dc04054ba2ff1f861357d0e7e76d021b273cd7",
"type": "github"
},
"original": {

View File

@@ -21,19 +21,17 @@
overlay = _: prev: let
pkgs = nixpkgs.legacyPackages.${prev.system};
in rec {
headscale = pkgs.buildGo120Module rec {
headscale = pkgs.buildGo121Module rec {
pname = "headscale";
version = headscaleVersion;
src = pkgs.lib.cleanSource self;
tags = ["ts2019"];
# Only run unit tests when testing a build
checkFlags = ["-short"];
# When updating go.mod or go.sum, a new sha will need to be calculated,
# update this if you have a mismatch after doing a change to thos files.
vendorSha256 = "sha256-dNE5wgR3oWXlYzPNXp0v/GGwY0/hvhOB5JWCb5EIbg8=";
vendorHash = "sha256-8x4RKaS8vnBYTPlvQTkDKWIAJOgPF99hvPiuRyTMrA8=";
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
};
@@ -49,7 +47,7 @@
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
};
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
vendorHash = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
nativeBuildInputs = [pkgs.installShellFiles];
};
@@ -71,7 +69,7 @@
sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls=";
};
vendorSha256 = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
vendorHash = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
nativeBuildInputs = [pkgs.installShellFiles];
@@ -85,7 +83,7 @@
overlays = [self.overlay];
inherit system;
};
buildDeps = with pkgs; [git go_1_20 gnumake];
buildDeps = with pkgs; [git go_1_21 gnumake];
devDeps = with pkgs;
buildDeps
++ [
@@ -97,6 +95,7 @@
gotestsum
gotests
ksh
ko
# 'dot' is needed for pprof graphs
# go tool pprof -http=: <source>
@@ -129,15 +128,7 @@
buildInputs = devDeps;
shellHook = ''
export GOFLAGS=-tags="ts2019"
export PATH="$PWD/result/bin:$PATH"
mkdir -p ./ignored
export HEADSCALE_PRIVATE_KEY_PATH="./ignored/private.key"
export HEADSCALE_NOISE_PRIVATE_KEY_PATH="./ignored/noise_private.key"
export HEADSCALE_DB_PATH="./ignored/db.sqlite"
export HEADSCALE_TLS_LETSENCRYPT_CACHE_DIR="./ignored/cache"
export HEADSCALE_UNIX_SOCKET="./ignored/headscale.sock"
'';
};

213
go.mod
View File

@@ -1,152 +1,209 @@
module github.com/juanfont/headscale
go 1.20
go 1.21.0
toolchain go1.21.4
require (
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/cenkalti/backoff/v4 v4.2.0
github.com/coreos/go-oidc/v3 v3.5.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.3.0
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/coreos/go-oidc/v3 v3.8.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/deckarep/golang-set/v2 v2.4.0
github.com/efekarakus/termcolor v1.0.1
github.com/glebarez/sqlite v1.7.0
github.com/glebarez/sqlite v1.10.0
github.com/go-gormigrate/gormigrate/v2 v2.1.1
github.com/gofrs/uuid/v5 v5.0.0
github.com/google/go-cmp v0.5.9
github.com/gorilla/mux v1.8.0
github.com/google/go-cmp v0.6.0
github.com/gorilla/mux v1.8.1
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2
github.com/klauspost/compress v1.16.5
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1
github.com/klauspost/compress v1.17.3
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
github.com/ory/dockertest/v3 v3.9.1
github.com/ory/dockertest/v3 v3.10.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/philip-bui/grpc-zerolog v1.0.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/common v0.42.0
github.com/pterm/pterm v0.12.58
github.com/puzpuzpuz/xsync/v2 v2.4.0
github.com/rs/zerolog v1.29.0
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/common v0.45.0
github.com/pterm/pterm v0.12.71
github.com/puzpuzpuz/xsync/v3 v3.0.2
github.com/rs/zerolog v1.31.0
github.com/samber/lo v1.38.1
github.com/spf13/cobra v1.7.0
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.2
github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.17.0
github.com/stretchr/testify v1.8.4
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
go4.org/netipx v0.0.0-20230303233057-f1b76eb4bb35
golang.org/x/crypto v0.8.0
golang.org/x/net v0.10.0
golang.org/x/oauth2 v0.7.0
golang.org/x/sync v0.2.0
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd
google.golang.org/grpc v1.54.0
google.golang.org/protobuf v1.30.0
go4.org/netipx v0.0.0-20230824141953-6213f710f925
golang.org/x/crypto v0.16.0
golang.org/x/exp v0.0.0-20231127185646-65229373498e
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.5.0
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.31.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.4.8
gorm.io/gorm v1.24.6
tailscale.com v1.44.0
gorm.io/driver/postgres v1.5.4
gorm.io/gorm v1.25.5
tailscale.com v1.56.1
)
require (
atomicgo.dev/cursor v0.1.1 // indirect
atomicgo.dev/cursor v0.2.0 // indirect
atomicgo.dev/keyboard v0.2.9 // indirect
atomicgo.dev/schedule v0.1.0 // indirect
filippo.io/edwards25519 v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.42 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.40 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect
github.com/aws/aws-sdk-go-v2/service/ssm v1.38.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect
github.com/aws/smithy-go v1.14.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/docker/cli v23.0.5+incompatible // indirect
github.com/docker/docker v24.0.4+incompatible // indirect
github.com/containerd/continuity v0.4.3 // indirect
github.com/coreos/go-iptables v0.7.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/dblohm7/wingoes v0.0.0-20231025182615-65d8b4b5428f // indirect
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
github.com/docker/cli v24.0.7+incompatible // indirect
github.com/docker/docker v24.0.7+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
github.com/glebarez/go-sqlite v1.20.3 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.5.0 // indirect
github.com/glebarez/go-sqlite v1.21.2 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect
github.com/google/pprof v0.0.0-20231127191134-f3a68a39ae15 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.3 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/gookit/color v1.5.4 // indirect
github.com/gorilla/csrf v1.7.1 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/illarion/gonotify v1.0.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/insomniacslk/dhcp v0.0.0-20230908212754-65c27093e38a // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackc/pgx/v5 v5.5.0 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
github.com/jsimonetti/rtnetlink v1.3.2 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/lithammer/fuzzysearch v1.1.5 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mdlayher/genetlink v1.3.2 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/sdnotify v1.0.0 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/miekg/dns v1.1.57 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.7 // indirect
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
github.com/opencontainers/runc v1.1.10 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
github.com/tailscale/golang-x-crypto v0.0.0-20230713185742-f0b76a10a08e // indirect
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect
github.com/tailscale/setec v0.0.0-20230926024544-07dde05889e7 // indirect
github.com/tailscale/web-client-prebuilt v0.0.0-20231213172531-a4fa669015b2 // indirect
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect
github.com/tcnksm/go-httpstat v0.2.0 // indirect
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sys v0.8.1-0.20230609144347-5059a07aa46a // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.16.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gotest.tools/v3 v3.4.0 // indirect
modernc.org/libc v1.22.2 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.5.0 // indirect
modernc.org/sqlite v1.20.3 // indirect
nhooyr.io/websocket v1.8.7 // indirect
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect
inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect
modernc.org/libc v1.34.11 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.7.2 // indirect
modernc.org/sqlite v1.28.0 // indirect
nhooyr.io/websocket v1.8.10 // indirect
)

916
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -48,6 +48,7 @@ import (
"google.golang.org/grpc/peer"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
@@ -59,6 +60,9 @@ var (
errUnsupportedLetsEncryptChallengeType = errors.New(
"unknown value for Lets Encrypt challenge type",
)
errEmptyInitialDERPMap = errors.New(
"initial DERPMap is empty, Headscale requries at least one entry",
)
)
const (
@@ -77,7 +81,6 @@ type Headscale struct {
dbString string
dbType string
dbDebug bool
privateKey2019 *key.MachinePrivate
noisePrivateKey *key.MachinePrivate
DERPMap *tailcfg.DERPMap
@@ -96,26 +99,23 @@ type Headscale struct {
pollNetMapStreamWG sync.WaitGroup
}
var (
profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED")
tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED")
tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR")
tailsqlTSKey = envknob.String("TS_AUTHKEY")
)
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
if profilingEnabled {
runtime.SetBlockProfileRate(1)
}
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create private key: %w", err)
}
// TS2021 requires to have a different key from the legacy protocol.
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
}
if privateKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
}
var dbString string
switch cfg.DBtype {
case db.Postgres:
@@ -156,7 +156,6 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
cfg: cfg,
dbType: cfg.DBtype,
dbString: dbString,
privateKey2019: privateKey,
noisePrivateKey: noisePrivateKey,
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
@@ -199,10 +198,21 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
}
if cfg.DERP.ServerEnabled {
// TODO(kradalby): replace this key with a dedicated DERP key.
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
}
if derpServerKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf(
"DERP server private key and noise private key are the same: %w",
err,
)
}
embeddedDERPServer, err := derpServer.NewDERPServer(
cfg.ServerURL,
key.NodePrivate(*privateKey),
key.NodePrivate(*derpServerKey),
&cfg.DERP,
)
if err != nil {
@@ -258,25 +268,18 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
case <-ticker.C:
log.Info().Msg("Fetching DERPMap updates")
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled {
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
region, _ := h.DERPServer.GenerateRegion()
h.DERPMap.Regions[region.RegionID] = &region
}
h.nodeNotifier.NotifyAll(types.StateUpdate{
stateUpdate := types.StateUpdate{
Type: types.StateDERPUpdated,
DERPMap: *h.DERPMap,
})
}
}
}
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
err := h.db.HandlePrimarySubnetFailover()
if err != nil {
log.Error().Err(err).Msg("failed to handle primary subnet failover")
DERPMap: h.DERPMap,
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyAll(stateUpdate)
}
}
}
}
@@ -449,10 +452,9 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
h.addLegacyHandlers(router)
router.HandleFunc("/register/{mkey}", h.RegisterWebAPI).Methods(http.MethodGet)
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/register/{mkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
@@ -499,7 +501,9 @@ func (h *Headscale) Serve() error {
return err
}
h.DERPMap.Regions[region.RegionID] = &region
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
h.DERPMap.Regions[region.RegionID] = &region
}
go h.DERPServer.ServeSTUN()
}
@@ -510,13 +514,15 @@ func (h *Headscale) Serve() error {
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
}
if len(h.DERPMap.Regions) == 0 {
return errEmptyInitialDERPMap
}
// TODO(kradalby): These should have cancel channels and be cleaned
// up on shutdown.
go h.expireEphemeralNodes(updateInterval)
go h.expireExpiredMachines(updateInterval)
go h.failoverSubnetRoutes(updateInterval)
if zl.GlobalLevel() == zl.TraceLevel {
zerolog.RespLog = true
} else {
@@ -572,7 +578,10 @@ func (h *Headscale) Serve() error {
}
// Start the local gRPC server without TLS and without authentication
grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
grpcSocket := grpc.NewServer(
// Uncomment to debug grpc communication.
// zerolog.UnaryInterceptor(),
)
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
reflection.Register(grpcSocket)
@@ -612,7 +621,8 @@ func (h *Headscale) Serve() error {
grpc.UnaryInterceptor(
grpcMiddleware.ChainUnaryServer(
h.grpcAuthenticationInterceptor,
zerolog.NewUnaryServerInterceptor(),
// Uncomment to debug grpc communication.
// zerolog.NewUnaryServerInterceptor(),
),
),
}
@@ -698,6 +708,18 @@ func (h *Headscale) Serve() error {
log.Info().
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
var tailsqlContext context.Context
if tailsqlEnabled {
if h.cfg.DBtype != db.Sqlite {
log.Fatal().Str("type", h.cfg.DBtype).Msgf("tailsql only support %q", db.Sqlite)
}
if tailsqlTSKey == "" {
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
}
tailsqlContext = context.Background()
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.DBpath)
}
// Handle common process-killing signals so we can gracefully shut down:
h.shutdownChan = make(chan struct{})
sigc := make(chan os.Signal, 1)
@@ -763,6 +785,10 @@ func (h *Headscale) Serve() error {
grpcListener.Close()
}
if tailsqlContext != nil {
tailsqlContext.Done()
}
// Close network listeners
promHTTPListener.Close()
httpListener.Close()
@@ -900,7 +926,8 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
if err != nil {
return nil, fmt.Errorf(
"failed to save private key to disk: %w",
"failed to save private key to disk at path %q: %w",
path,
err,
)
}
@@ -911,16 +938,9 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
}
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
privateKeyEnsurePrefix := util.PrivateKeyEnsurePrefix(trimmedPrivateKey)
var machineKey key.MachinePrivate
if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
log.Info().
Str("path", path).
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
"If the key is in WireGuard format, delete the key and restart headscale. " +
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
return nil, fmt.Errorf("failed to parse private key: %w", err)
}

View File

@@ -1,13 +1,13 @@
package hscontrol
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
@@ -16,22 +16,62 @@ import (
"tailscale.com/types/key"
)
// handleRegister is the common logic for registering a client in the legacy and Noise protocols
//
// When using Noise, the machineKey is Zero.
func logAuthFunc(
registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (func(string), func(string), func(error, string)) {
return func(msg string) {
log.Info().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Msg(msg)
},
func(msg string) {
log.Trace().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Msg(msg)
},
func(err error, msg string) {
log.Error().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Err(err).
Msg(msg)
}
}
// handleRegister is the logic for registering a client.
func (h *Headscale) handleRegister(
writer http.ResponseWriter,
req *http.Request,
registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic,
isNoise bool,
) {
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
now := time.Now().UTC()
logTrace("handleRegister called, looking up machine in DB")
node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
logTrace("handleRegister database lookup has returned")
if errors.Is(err, gorm.ErrRecordNotFound) {
// If the node has AuthKey set, handle registration via PreAuthKeys
if registerRequest.Auth.AuthKey != "" {
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
h.handleAuthKey(writer, registerRequest, machineKey)
return
}
@@ -45,49 +85,29 @@ func (h *Headscale) handleRegister(
// is that the client will hammer headscale with requests until it gets a
// successful RegisterResponse.
if registerRequest.Followup != "" {
if _, ok := h.registrationCache.Get(util.NodePublicKeyStripPrefix(registerRequest.NodeKey)); ok {
log.Debug().
Caller().
Str("node", registerRequest.Hostinfo.Hostname).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("follow_up", registerRequest.Followup).
Bool("noise", isNoise).
Msg("Node is waiting for interactive login")
logTrace("register request is a followup")
if _, ok := h.registrationCache.Get(machineKey.String()); ok {
logTrace("Node is waiting for interactive login")
select {
case <-req.Context().Done():
return
case <-time.After(registrationHoldoff):
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
h.handleNewNode(writer, registerRequest, machineKey)
return
}
}
}
log.Info().
Caller().
Str("node", registerRequest.Hostinfo.Hostname).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("follow_up", registerRequest.Followup).
Bool("noise", isNoise).
Msg("New node not yet in the database")
logInfo("Node not found in database, creating new")
givenName, err := h.db.GenerateGivenName(
machineKey.String(),
machineKey,
registerRequest.Hostinfo.Hostname,
)
if err != nil {
log.Error().
Caller().
Str("func", "RegistrationHandler").
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
Err(err).
Msg("Failed to generate given name for node")
logErr(err, "Failed to generate given name for node")
return
}
@@ -97,31 +117,26 @@ func (h *Headscale) handleRegister(
// We create the node and then keep it around until a callback
// happens
newNode := types.Node{
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
MachineKey: machineKey,
Hostname: registerRequest.Hostinfo.Hostname,
GivenName: givenName,
NodeKey: util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
NodeKey: registerRequest.NodeKey,
LastSeen: &now,
Expiry: &time.Time{},
}
if !registerRequest.Expiry.IsZero() {
log.Trace().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Time("expiry", registerRequest.Expiry).
Msg("Non-zero expiry time requested")
logTrace("Non-zero expiry time requested")
newNode.Expiry = &registerRequest.Expiry
}
h.registrationCache.Set(
newNode.NodeKey,
machineKey.String(),
newNode,
registerCacheExpiration,
)
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
h.handleNewNode(writer, registerRequest, machineKey)
return
}
@@ -134,11 +149,7 @@ func (h *Headscale) handleRegister(
// (juan): For a while we had a bug where we were not storing the MachineKey for the nodes using the TS2021,
// due to a misunderstanding of the protocol https://github.com/juanfont/headscale/issues/1054
// So if we have a not valid MachineKey (but we were able to fetch the node with the NodeKeys), we update it.
var storedMachineKey key.MachinePublic
err = storedMachineKey.UnmarshalText(
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
)
if err != nil || storedMachineKey.IsZero() {
if err != nil || node.MachineKey.IsZero() {
if err := h.db.NodeSetMachineKey(node, machineKey); err != nil {
log.Error().
Caller().
@@ -156,12 +167,12 @@ func (h *Headscale) handleRegister(
// - Trying to log out (sending a expiry in the past)
// - A valid, registered node, looking for /map
// - Expired node wanting to reauthenticate
if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.NodeKey) {
if node.NodeKey.String() == registerRequest.NodeKey.String() {
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
if !registerRequest.Expiry.IsZero() &&
registerRequest.Expiry.UTC().Before(now) {
h.handleNodeLogOut(writer, *node, machineKey, isNoise)
h.handleNodeLogOut(writer, *node, machineKey)
return
}
@@ -169,21 +180,20 @@ func (h *Headscale) handleRegister(
// If node is not expired, and it is register, we have a already accepted this node,
// let it proceed with a valid registration
if !node.IsExpired() {
h.handleNodeWithValidRegistration(writer, *node, machineKey, isNoise)
h.handleNodeWithValidRegistration(writer, *node, machineKey)
return
}
}
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.OldNodeKey) &&
if node.NodeKey.String() == registerRequest.OldNodeKey.String() &&
!node.IsExpired() {
h.handleNodeKeyRefresh(
writer,
registerRequest,
*node,
machineKey,
isNoise,
)
return
@@ -198,7 +208,7 @@ func (h *Headscale) handleRegister(
}
// The node has expired or it is logged out
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey, isNoise)
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey)
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
node.Expiry = &time.Time{}
@@ -207,9 +217,9 @@ func (h *Headscale) handleRegister(
// we need to make sure the NodeKey matches the one in the request
// TODO(juan): What happens when using fast user switching between two
// headscale-managed tailnets?
node.NodeKey = util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
node.NodeKey = registerRequest.NodeKey
h.registrationCache.Set(
util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
machineKey.String(),
*node,
registerCacheExpiration,
)
@@ -219,7 +229,6 @@ func (h *Headscale) handleRegister(
}
// handleAuthKey contains the logic to manage auth key client registration
// It is used both by the legacy and the new Noise protocol.
// When using Noise, the machineKey is Zero.
//
// TODO: check if any locks are needed around IP allocation.
@@ -227,12 +236,10 @@ func (h *Headscale) handleAuthKey(
writer http.ResponseWriter,
registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic,
isNoise bool,
) {
log.Debug().
Caller().
Str("node", registerRequest.Hostinfo.Hostname).
Bool("noise", isNoise).
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
resp := tailcfg.RegisterResponse{}
@@ -240,17 +247,15 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Err(err).
Msg("Failed authentication via AuthKey")
resp.MachineAuthorized = false
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Err(err).
Msg("Cannot encode message")
@@ -267,14 +272,12 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to write response")
}
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("Failed authentication via AuthKey")
@@ -290,11 +293,10 @@ func (h *Headscale) handleAuthKey(
log.Debug().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("Authentication key was valid, proceeding to acquire IP addresses")
nodeKey := util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
nodeKey := registerRequest.NodeKey
// retrieve node information if it exist
// The error is not important, because if it does not
@@ -304,7 +306,6 @@ func (h *Headscale) handleAuthKey(
if node != nil {
log.Trace().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("node was already registered before, refreshing with new auth key")
@@ -314,7 +315,6 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Err(err).
Msg("Failed to refresh node")
@@ -322,7 +322,7 @@ func (h *Headscale) handleAuthKey(
return
}
aclTags := pak.Proto().AclTags
aclTags := pak.Proto().GetAclTags()
if len(aclTags) > 0 {
// This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login
err = h.db.SetTags(node, aclTags)
@@ -330,7 +330,6 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Strs("aclTags", aclTags).
Err(err).
@@ -342,11 +341,10 @@ func (h *Headscale) handleAuthKey(
} else {
now := time.Now().UTC()
givenName, err := h.db.GenerateGivenName(util.MachinePublicKeyStripPrefix(machineKey), registerRequest.Hostinfo.Hostname)
givenName, err := h.db.GenerateGivenName(machineKey, registerRequest.Hostinfo.Hostname)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("func", "RegistrationHandler").
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
Err(err).
@@ -359,13 +357,13 @@ func (h *Headscale) handleAuthKey(
Hostname: registerRequest.Hostinfo.Hostname,
GivenName: givenName,
UserID: pak.User.ID,
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
MachineKey: machineKey,
RegisterMethod: util.RegisterMethodAuthKey,
Expiry: &registerRequest.Expiry,
NodeKey: nodeKey,
LastSeen: &now,
AuthKeyID: uint(pak.ID),
ForcedTags: pak.Proto().AclTags,
ForcedTags: pak.Proto().GetAclTags(),
}
node, err = h.db.RegisterNode(
@@ -374,7 +372,6 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("could not register node")
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
@@ -389,7 +386,6 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to use pre-auth key")
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
@@ -405,11 +401,10 @@ func (h *Headscale) handleAuthKey(
// Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName*
resp.Login = *pak.User.TailscaleLogin()
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Err(err).
Msg("Cannot encode message")
@@ -427,54 +422,46 @@ func (h *Headscale) handleAuthKey(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to write response")
}
log.Info().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")).
Msg("Successfully authenticated via AuthKey")
}
// handleNewNode exposes for both legacy and Noise the functionality to get a URL
// for authorizing the node. This url is then showed to the user by the local Tailscale client.
// handleNewNode returns the authorisation URL to the client based on what type
// of registration headscale is configured with.
// This url is then showed to the user by the local Tailscale client.
func (h *Headscale) handleNewNode(
writer http.ResponseWriter,
registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic,
isNoise bool,
) {
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
resp := tailcfg.RegisterResponse{}
// The node registration is new, redirect the client to the registration URL
log.Debug().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("The node seems to be new, sending auth url")
logTrace("The node seems to be new, sending auth url")
if h.oauth2Config != nil {
resp.AuthURL = fmt.Sprintf(
"%s/oidc/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"),
registerRequest.NodeKey,
machineKey.String(),
)
} else {
resp.AuthURL = fmt.Sprintf("%s/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"),
registerRequest.NodeKey)
machineKey.String())
}
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
logErr(err, "Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
return
@@ -484,31 +471,20 @@ func (h *Headscale) handleNewNode(
writer.WriteHeader(http.StatusOK)
_, err = writer.Write(respBody)
if err != nil {
log.Error().
Bool("noise", isNoise).
Caller().
Err(err).
Msg("Failed to write response")
logErr(err, "Failed to write response")
}
log.Info().
Caller().
Bool("noise", isNoise).
Str("AuthURL", resp.AuthURL).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("Successfully sent auth url")
logInfo(fmt.Sprintf("Successfully sent auth url: %s", resp.AuthURL))
}
func (h *Headscale) handleNodeLogOut(
writer http.ResponseWriter,
node types.Node,
machineKey key.MachinePublic,
isNoise bool,
) {
resp := tailcfg.RegisterResponse{}
log.Info().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("Client requested logout")
@@ -517,7 +493,6 @@ func (h *Headscale) handleNodeLogOut(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to expire node")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
@@ -525,15 +500,27 @@ func (h *Headscale) handleNodeLogOut(
return
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &now,
},
},
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
resp.AuthURL = ""
resp.MachineAuthorized = false
resp.NodeKeyExpired = true
resp.User = *node.User.TailscaleUser()
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
@@ -546,7 +533,6 @@ func (h *Headscale) handleNodeLogOut(
_, err = writer.Write(respBody)
if err != nil {
log.Error().
Bool("noise", isNoise).
Caller().
Err(err).
Msg("Failed to write response")
@@ -568,7 +554,6 @@ func (h *Headscale) handleNodeLogOut(
log.Info().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("Successfully logged out")
}
@@ -577,14 +562,12 @@ func (h *Headscale) handleNodeWithValidRegistration(
writer http.ResponseWriter,
node types.Node,
machineKey key.MachinePublic,
isNoise bool,
) {
resp := tailcfg.RegisterResponse{}
// The node registration is valid, respond with redirect to /map
log.Debug().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("Client is registered and we have the current NodeKey. All clear to /map")
@@ -593,11 +576,10 @@ func (h *Headscale) handleNodeWithValidRegistration(
resp.User = *node.User.TailscaleUser()
resp.Login = *node.User.TailscaleLogin()
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name).
@@ -615,14 +597,12 @@ func (h *Headscale) handleNodeWithValidRegistration(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to write response")
}
log.Info().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("Node successfully authorized")
}
@@ -632,13 +612,11 @@ func (h *Headscale) handleNodeKeyRefresh(
registerRequest tailcfg.RegisterRequest,
node types.Node,
machineKey key.MachinePublic,
isNoise bool,
) {
resp := tailcfg.RegisterResponse{}
log.Info().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("We have the OldNodeKey in the database. This is a key refresh")
@@ -655,11 +633,10 @@ func (h *Headscale) handleNodeKeyRefresh(
resp.AuthURL = ""
resp.User = *node.User.TailscaleUser()
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
@@ -673,14 +650,12 @@ func (h *Headscale) handleNodeKeyRefresh(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to write response")
}
log.Info().
Caller().
Bool("noise", isNoise).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("old_node_key", registerRequest.OldNodeKey.ShortString()).
Str("node", node.Hostname).
@@ -692,12 +667,11 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
registerRequest tailcfg.RegisterRequest,
node types.Node,
machineKey key.MachinePublic,
isNoise bool,
) {
resp := tailcfg.RegisterResponse{}
if registerRequest.Auth.AuthKey != "" {
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
h.handleAuthKey(writer, registerRequest, machineKey)
return
}
@@ -705,7 +679,6 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
// The client has registered before, but has expired or logged out
log.Trace().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
@@ -715,18 +688,17 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
if h.oauth2Config != nil {
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"),
registerRequest.NodeKey)
machineKey.String())
} else {
resp.AuthURL = fmt.Sprintf("%s/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"),
registerRequest.NodeKey)
machineKey.String())
}
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
respBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name).
@@ -744,14 +716,12 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to write response")
}
log.Trace().
Caller().
Bool("noise", isNoise).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).

View File

@@ -1,61 +0,0 @@
//go:build ts2019
package hscontrol
import (
"io"
"net/http"
"github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// RegistrationHandler handles the actual registration process of a machine
// Endpoint /machine/:mkey.
func (h *Headscale) RegistrationHandler(
writer http.ResponseWriter,
req *http.Request,
) {
vars := mux.Vars(req)
machineKeyStr, ok := vars["mkey"]
if !ok || machineKeyStr == "" {
log.Error().
Str("handler", "RegistrationHandler").
Msg("No machine ID in request")
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
return
}
body, _ := io.ReadAll(req.Body)
var machineKey key.MachinePublic
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(machineKeyStr)))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot parse machine key")
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
return
}
registerRequest := tailcfg.RegisterRequest{}
err = util.DecodeAndUnmarshalNaCl(body, &registerRequest, &machineKey, h.privateKey2019)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot decode message")
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
return
}
h.handleRegister(writer, req, registerRequest, machineKey, false)
}

View File

@@ -39,7 +39,19 @@ func (ns *noiseServer) NoiseRegistrationHandler(
return
}
// Reject unsupported versions
if registerRequest.Version < MinimumCapVersion {
log.Info().
Caller().
Int("min_version", int(MinimumCapVersion)).
Int("client_version", int(registerRequest.Version)).
Msg("unsupported client connected")
http.Error(writer, "Internal error", http.StatusBadRequest)
return
}
ns.nodeKey = registerRequest.NodeKey
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer(), true)
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer())
}

View File

@@ -35,9 +35,6 @@ func (s *Suite) TestGetUsedIps(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -83,9 +80,6 @@ func (s *Suite) TestGetMultiIp(c *check.C) {
node := types.Node{
ID: uint64(index),
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -173,9 +167,6 @@ func (s *Suite) TestGetAvailableIpNodeWithoutIP(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,

View File

@@ -2,13 +2,16 @@ package db
import (
"context"
"database/sql"
"errors"
"fmt"
"net/netip"
"strings"
"sync"
"time"
"github.com/glebarez/sqlite"
"github.com/go-gormigrate/gormigrate/v2"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
@@ -19,15 +22,11 @@ import (
)
const (
dbVersion = "1"
Postgres = "postgres"
Sqlite = "sqlite3"
Postgres = "postgres"
Sqlite = "sqlite3"
)
var (
errValueNotFound = errors.New("not found")
errDatabaseNotSupported = errors.New("database type not supported")
)
var errDatabaseNotSupported = errors.New("database type not supported")
// KV is a key-value store in a psql table. For future use...
// TODO(kradalby): Is this used for anything?
@@ -62,6 +61,261 @@ func NewHeadscaleDatabase(
return nil, err
}
migrations := gormigrate.New(dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{
// New migrations should be added as transactions at the end of this list.
// The initial commit here is quite messy, completely out of order and
// has no versioning and is the tech debt of not having versioned migrations
// prior to this point. This first migration is all DB changes to bring a DB
// up to 0.23.0.
{
ID: "202312101416",
Migrate: func(tx *gorm.DB) error {
if dbType == Postgres {
tx.Exec(`create extension if not exists "uuid-ossp";`)
}
_ = tx.Migrator().RenameTable("namespaces", "users")
// the big rename from Machine to Node
_ = tx.Migrator().RenameTable("machines", "nodes")
_ = tx.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id")
err = tx.AutoMigrate(types.User{})
if err != nil {
return err
}
_ = tx.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id")
_ = tx.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
_ = tx.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
_ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
// GivenName is used as the primary source of DNS names, make sure
// the field is populated and normalized if it was not when the
// node was registered.
_ = tx.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name")
// If the Node table has a column for registered,
// find all occourences of "false" and drop them. Then
// remove the column.
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
log.Info().
Msg(`Database has legacy "registered" column in node, removing...`)
nodes := types.Nodes{}
if err := tx.Not("registered").Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for _, node := range nodes {
log.Info().
Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()).
Msg("Deleting unregistered node")
if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()).
Msg("Error deleting unregistered node")
}
}
err := tx.Migrator().DropColumn(&types.Node{}, "registered")
if err != nil {
log.Error().Err(err).Msg("Error dropping registered column")
}
}
err = tx.AutoMigrate(&types.Route{})
if err != nil {
return err
}
err = tx.AutoMigrate(&types.Node{})
if err != nil {
return err
}
// Ensure all keys have correct prefixes
// https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35
type result struct {
ID uint64
MachineKey string
NodeKey string
DiscoKey string
}
var results []result
err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").Find(&results).Error
if err != nil {
return err
}
for _, node := range results {
mKey := node.MachineKey
if !strings.HasPrefix(node.MachineKey, "mkey:") {
mKey = "mkey:" + node.MachineKey
}
nKey := node.NodeKey
if !strings.HasPrefix(node.NodeKey, "nodekey:") {
nKey = "nodekey:" + node.NodeKey
}
dKey := node.DiscoKey
if !strings.HasPrefix(node.DiscoKey, "discokey:") {
dKey = "discokey:" + node.DiscoKey
}
err := tx.Exec(
"UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id",
sql.Named("mKey", mKey),
sql.Named("nKey", nKey),
sql.Named("dKey", dKey),
sql.Named("id", node.ID),
).Error
if err != nil {
return err
}
}
if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...")
type NodeAux struct {
ID uint64
EnabledRoutes types.IPPrefixes
}
nodesAux := []NodeAux{}
err := tx.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
if err != nil {
log.Fatal().Err(err).Msg("Error accessing db")
}
for _, node := range nodesAux {
for _, prefix := range node.EnabledRoutes {
if err != nil {
log.Error().
Err(err).
Str("enabled_route", prefix.String()).
Msg("Error parsing enabled_route")
continue
}
err = tx.Preload("Node").
Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)).
First(&types.Route{}).
Error
if err == nil {
log.Info().
Str("enabled_route", prefix.String()).
Msg("Route already migrated to new table, skipping")
continue
}
route := types.Route{
NodeID: node.ID,
Advertised: true,
Enabled: true,
Prefix: types.IPPrefix(prefix),
}
if err := tx.Create(&route).Error; err != nil {
log.Error().Err(err).Msg("Error creating route")
} else {
log.Info().
Uint64("node_id", route.NodeID).
Str("prefix", prefix.String()).
Msg("Route migrated")
}
}
}
err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes")
if err != nil {
log.Error().Err(err).Msg("Error dropping enabled_routes column")
}
}
if tx.Migrator().HasColumn(&types.Node{}, "given_name") {
nodes := types.Nodes{}
if err := tx.Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for item, node := range nodes {
if node.GivenName == "" {
normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper(
node.Hostname,
)
if err != nil {
log.Error().
Caller().
Str("hostname", node.Hostname).
Err(err).
Msg("Failed to normalize node hostname in DB migration")
}
err = tx.Model(nodes[item]).Updates(types.Node{
GivenName: normalizedHostname,
}).Error
if err != nil {
log.Error().
Caller().
Str("hostname", node.Hostname).
Err(err).
Msg("Failed to save normalized node name in DB migration")
}
}
}
}
err = tx.AutoMigrate(&KV{})
if err != nil {
return err
}
err = tx.AutoMigrate(&types.PreAuthKey{})
if err != nil {
return err
}
err = tx.AutoMigrate(&types.PreAuthKeyACLTag{})
if err != nil {
return err
}
_ = tx.Migrator().DropTable("shared_machines")
err = tx.AutoMigrate(&types.APIKey{})
if err != nil {
return err
}
return nil
},
Rollback: func(tx *gorm.DB) error {
return nil
},
},
{
// drop key-value table, it is not used, and has not contained
// useful data for a long time or ever.
ID: "202312101430",
Migrate: func(tx *gorm.DB) error {
return tx.Migrator().DropTable("kvs")
},
Rollback: func(tx *gorm.DB) error {
return nil
},
},
})
if err = migrations.Migrate(); err != nil {
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
}
db := HSDatabase{
db: dbConn,
notifier: notifier,
@@ -70,191 +324,6 @@ func NewHeadscaleDatabase(
baseDomain: baseDomain,
}
log.Debug().Msgf("database %#v", dbConn)
if dbType == Postgres {
dbConn.Exec(`create extension if not exists "uuid-ossp";`)
}
_ = dbConn.Migrator().RenameTable("namespaces", "users")
// the big rename from Machine to Node
_ = dbConn.Migrator().RenameTable("machines", "nodes")
_ = dbConn.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id")
err = dbConn.AutoMigrate(types.User{})
if err != nil {
return nil, err
}
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id")
_ = dbConn.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
// GivenName is used as the primary source of DNS names, make sure
// the field is populated and normalized if it was not when the
// node was registered.
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name")
// If the MacNodehine table has a column for registered,
// find all occourences of "false" and drop them. Then
// remove the column.
if dbConn.Migrator().HasColumn(&types.Node{}, "registered") {
log.Info().
Msg(`Database has legacy "registered" column in node, removing...`)
nodes := types.Nodes{}
if err := dbConn.Not("registered").Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for _, node := range nodes {
log.Info().
Str("node", node.Hostname).
Str("machine_key", node.MachineKey).
Msg("Deleting unregistered node")
if err := dbConn.Delete(&types.Node{}, node.ID).Error; err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("machine_key", node.MachineKey).
Msg("Error deleting unregistered node")
}
}
err := dbConn.Migrator().DropColumn(&types.Node{}, "registered")
if err != nil {
log.Error().Err(err).Msg("Error dropping registered column")
}
}
err = dbConn.AutoMigrate(&types.Route{})
if err != nil {
return nil, err
}
if dbConn.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...")
type NodeAux struct {
ID uint64
EnabledRoutes types.IPPrefixes
}
nodesAux := []NodeAux{}
err := dbConn.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
if err != nil {
log.Fatal().Err(err).Msg("Error accessing db")
}
for _, node := range nodesAux {
for _, prefix := range node.EnabledRoutes {
if err != nil {
log.Error().
Err(err).
Str("enabled_route", prefix.String()).
Msg("Error parsing enabled_route")
continue
}
err = dbConn.Preload("Node").
Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)).
First(&types.Route{}).
Error
if err == nil {
log.Info().
Str("enabled_route", prefix.String()).
Msg("Route already migrated to new table, skipping")
continue
}
route := types.Route{
NodeID: node.ID,
Advertised: true,
Enabled: true,
Prefix: types.IPPrefix(prefix),
}
if err := dbConn.Create(&route).Error; err != nil {
log.Error().Err(err).Msg("Error creating route")
} else {
log.Info().
Uint64("node_id", route.NodeID).
Str("prefix", prefix.String()).
Msg("Route migrated")
}
}
}
err = dbConn.Migrator().DropColumn(&types.Node{}, "enabled_routes")
if err != nil {
log.Error().Err(err).Msg("Error dropping enabled_routes column")
}
}
err = dbConn.AutoMigrate(&types.Node{})
if err != nil {
return nil, err
}
if dbConn.Migrator().HasColumn(&types.Node{}, "given_name") {
nodes := types.Nodes{}
if err := dbConn.Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for item, node := range nodes {
if node.GivenName == "" {
normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper(
node.Hostname,
)
if err != nil {
log.Error().
Caller().
Str("hostname", node.Hostname).
Err(err).
Msg("Failed to normalize node hostname in DB migration")
}
err = db.RenameNode(nodes[item], normalizedHostname)
if err != nil {
log.Error().
Caller().
Str("hostname", node.Hostname).
Err(err).
Msg("Failed to save normalized node name in DB migration")
}
}
}
}
err = dbConn.AutoMigrate(&KV{})
if err != nil {
return nil, err
}
err = dbConn.AutoMigrate(&types.PreAuthKey{})
if err != nil {
return nil, err
}
err = dbConn.AutoMigrate(&types.PreAuthKeyACLTag{})
if err != nil {
return nil, err
}
_ = dbConn.Migrator().DropTable("shared_machines")
err = dbConn.AutoMigrate(&types.APIKey{})
if err != nil {
return nil, err
}
// TODO(kradalby): is this needed?
err = db.setValue("db_version", dbVersion)
return &db, err
}
@@ -304,39 +373,6 @@ func openDB(dbType, connectionAddr string, debug bool) (*gorm.DB, error) {
)
}
// getValue returns the value for the given key in KV.
func (hsdb *HSDatabase) getValue(key string) (string, error) {
var row KV
if result := hsdb.db.First(&row, "key = ?", key); errors.Is(
result.Error,
gorm.ErrRecordNotFound,
) {
return "", errValueNotFound
}
return row.Value, nil
}
// setValue sets value for the given key in KV.
func (hsdb *HSDatabase) setValue(key string, value string) error {
keyValue := KV{
Key: key,
Value: value,
}
if _, err := hsdb.getValue(key); err == nil {
hsdb.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
return nil
}
if err := hsdb.db.Create(keyValue).Error; err != nil {
return fmt.Errorf("failed to create key value pair in the database: %w", err)
}
return nil
}
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()

View File

@@ -55,17 +55,12 @@ func (hsdb *HSDatabase) listPeers(node *types.Node) (types.Nodes, error) {
Preload("User").
Preload("Routes").
Where("node_key <> ?",
node.NodeKey).Find(&nodes).Error; err != nil {
node.NodeKey.String()).Find(&nodes).Error; err != nil {
return types.Nodes{}, err
}
sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID })
log.Trace().
Caller().
Str("node", node.Hostname).
Msgf("Found peers: %s", nodes.String())
return nodes, nil
}
@@ -176,13 +171,19 @@ func (hsdb *HSDatabase) GetNodeByMachineKey(
hsdb.mu.RLock()
defer hsdb.mu.RUnlock()
return hsdb.getNodeByMachineKey(machineKey)
}
func (hsdb *HSDatabase) getNodeByMachineKey(
machineKey key.MachinePublic,
) (*types.Node, error) {
mach := types.Node{}
if result := hsdb.db.
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Preload("Routes").
First(&mach, "machine_key = ?", util.MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
First(&mach, "machine_key = ?", machineKey.String()); result.Error != nil {
return nil, result.Error
}
@@ -203,7 +204,7 @@ func (hsdb *HSDatabase) GetNodeByNodeKey(
Preload("User").
Preload("Routes").
First(&node, "node_key = ?",
util.NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
nodeKey.String()); result.Error != nil {
return nil, result.Error
}
@@ -224,9 +225,9 @@ func (hsdb *HSDatabase) GetNodeByAnyKey(
Preload("User").
Preload("Routes").
First(&node, "machine_key = ? OR node_key = ? OR node_key = ?",
util.MachinePublicKeyStripPrefix(machineKey),
util.NodePublicKeyStripPrefix(nodeKey),
util.NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
machineKey.String(),
nodeKey.String(),
oldNodeKey.String()); result.Error != nil {
return nil, result.Error
}
@@ -252,6 +253,10 @@ func (hsdb *HSDatabase) SetTags(
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
if len(tags) == 0 {
return nil
}
newTags := []string{}
for _, tag := range tags {
if !util.StringOrPrefixListContains(newTags, tag) {
@@ -265,10 +270,14 @@ func (hsdb *HSDatabase) SetTags(
return fmt.Errorf("failed to update tags for node in the database: %w", err)
}
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
}, node.MachineKey)
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from db.SetTags",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil
}
@@ -301,10 +310,14 @@ func (hsdb *HSDatabase) RenameNode(node *types.Node, newName string) error {
return fmt.Errorf("failed to rename node in the database: %w", err)
}
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
}, node.MachineKey)
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from db.RenameNode",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil
}
@@ -327,10 +340,28 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
)
}
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
}, node.MachineKey)
node.Expiry = &expiry
stateSelfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &expiry,
},
},
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil
}
@@ -354,10 +385,13 @@ func (hsdb *HSDatabase) deleteNode(node *types.Node) error {
return err
}
hsdb.notifier.NotifyAll(types.StateUpdate{
stateUpdate := types.StateUpdate{
Type: types.StatePeerRemoved,
Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)},
})
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil
}
@@ -376,7 +410,7 @@ func (hsdb *HSDatabase) UpdateLastSeen(node *types.Node) error {
func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
cache *cache.Cache,
nodeKeyStr string,
mkey key.MachinePublic,
userName string,
nodeExpiry *time.Time,
registrationMethod string,
@@ -384,20 +418,14 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
nodeKey := key.NodePublic{}
err := nodeKey.UnmarshalText([]byte(nodeKeyStr))
if err != nil {
return nil, err
}
log.Debug().
Str("nodeKey", nodeKey.ShortString()).
Str("machine_key", mkey.ShortString()).
Str("userName", userName).
Str("registrationMethod", registrationMethod).
Str("expiresAt", fmt.Sprintf("%v", nodeExpiry)).
Msg("Registering node from API/CLI or auth callback")
if nodeInterface, ok := cache.Get(util.NodePublicKeyStripPrefix(nodeKey)); ok {
if nodeInterface, ok := cache.Get(mkey.String()); ok {
if registrationNode, ok := nodeInterface.(types.Node); ok {
user, err := hsdb.getUser(userName)
if err != nil {
@@ -425,7 +453,7 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
)
if err == nil {
cache.Delete(nodeKeyStr)
cache.Delete(mkey.String())
}
return node, err
@@ -448,8 +476,8 @@ func (hsdb *HSDatabase) RegisterNode(node types.Node) (*types.Node, error) {
func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
log.Debug().
Str("node", node.Hostname).
Str("machine_key", node.MachineKey).
Str("node_key", node.NodeKey).
Str("machine_key", node.MachineKey.ShortString()).
Str("node_key", node.NodeKey.ShortString()).
Str("user", node.User.Name).
Msg("Registering node")
@@ -464,8 +492,8 @@ func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
log.Trace().
Caller().
Str("node", node.Hostname).
Str("machine_key", node.MachineKey).
Str("node_key", node.NodeKey).
Str("machine_key", node.MachineKey.ShortString()).
Str("node_key", node.NodeKey.ShortString()).
Str("user", node.User.Name).
Msg("Node authorized again")
@@ -507,7 +535,7 @@ func (hsdb *HSDatabase) NodeSetNodeKey(node *types.Node, nodeKey key.NodePublic)
defer hsdb.mu.Unlock()
if err := hsdb.db.Model(node).Updates(types.Node{
NodeKey: util.NodePublicKeyStripPrefix(nodeKey),
NodeKey: nodeKey,
}).Error; err != nil {
return err
}
@@ -524,7 +552,7 @@ func (hsdb *HSDatabase) NodeSetMachineKey(
defer hsdb.mu.Unlock()
if err := hsdb.db.Model(node).Updates(types.Node{
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
MachineKey: machineKey,
}).Error; err != nil {
return err
}
@@ -635,20 +663,6 @@ func (hsdb *HSDatabase) IsRoutesEnabled(node *types.Node, routeStr string) bool
return false
}
func (hsdb *HSDatabase) ListOnlineNodes(
node *types.Node,
) (map[tailcfg.NodeID]bool, error) {
hsdb.mu.RLock()
defer hsdb.mu.RUnlock()
peers, err := hsdb.listPeers(node)
if err != nil {
return nil, err
}
return peers.OnlineNodeMap(), nil
}
// enableRoutes enables new routes based on a list of new routes.
func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) error {
newRoutes := make([]netip.Prefix, len(routeStrs))
@@ -700,10 +714,43 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
}
}
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
}, node.MachineKey)
// Ensure the node has the latest routes when notifying the other
// nodes
nRoutes, err := hsdb.getNodeRoutes(node)
if err != nil {
return fmt.Errorf("failed to read back routes: %w", err)
}
node.Routes = nRoutes
log.Trace().
Caller().
Str("node", node.Hostname).
Strs("routes", routeStrs).
Msg("enabling routes")
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from db.enableRoutes",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(
stateUpdate, node.MachineKey.String())
}
// Send an update to the node itself with to ensure it
// has an updated packetfilter allowing the new route
// if it is defined in the ACL.
selfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if selfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(
selfUpdate,
node.MachineKey)
}
return nil
}
@@ -734,7 +781,10 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
return normalizedHostname, nil
}
func (hsdb *HSDatabase) GenerateGivenName(machineKey string, suppliedName string) (string, error) {
func (hsdb *HSDatabase) GenerateGivenName(
mkey key.MachinePublic,
suppliedName string,
) (string, error) {
hsdb.mu.RLock()
defer hsdb.mu.RUnlock()
@@ -749,17 +799,22 @@ func (hsdb *HSDatabase) GenerateGivenName(machineKey string, suppliedName string
return "", err
}
for _, node := range nodes {
if node.MachineKey != machineKey && node.GivenName == givenName {
postfixedName, err := generateGivenName(suppliedName, true)
if err != nil {
return "", err
}
givenName = postfixedName
var nodeFound *types.Node
for idx, node := range nodes {
if node.GivenName == givenName {
nodeFound = nodes[idx]
}
}
if nodeFound != nil && nodeFound.MachineKey.String() != mkey.String() {
postfixedName, err := generateGivenName(suppliedName, true)
if err != nil {
return "", err
}
givenName = postfixedName
}
return givenName, nil
}
@@ -824,52 +879,69 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
// checked everything.
started := time.Now()
users, err := hsdb.listUsers()
expiredNodes := make([]*types.Node, 0)
nodes, err := hsdb.listNodes()
if err != nil {
log.Error().Err(err).Msg("Error listing users")
log.Error().
Err(err).
Msg("Error listing nodes to find expired nodes")
return time.Unix(0, 0)
}
for index, node := range nodes {
if node.IsExpired() &&
// TODO(kradalby): Replace this, it is very spammy
// It will notify about all nodes that has been expired.
// It should only notify about expired nodes since _last check_.
node.Expiry.After(lastCheck) {
expiredNodes = append(expiredNodes, &nodes[index])
for _, user := range users {
nodes, err := hsdb.listNodesByUser(user.Name)
if err != nil {
log.Error().
Err(err).
Str("user", user.Name).
Msg("Error listing nodes in user")
return time.Unix(0, 0)
}
expired := make([]tailcfg.NodeID, 0)
for index, node := range nodes {
if node.IsExpired() &&
node.Expiry.After(lastCheck) {
expired = append(expired, tailcfg.NodeID(node.ID))
now := time.Now()
err := hsdb.nodeSetExpiry(nodes[index], now)
if err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("🤮 Cannot expire node")
} else {
log.Info().
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("Node successfully expired")
}
// Do not use setNodeExpiry as that has a notifier hook, which
// can cause a deadlock, we are updating all changed nodes later
// and there is no point in notifiying twice.
if err := hsdb.db.Model(&nodes[index]).Updates(types.Node{
Expiry: &started,
}).Error; err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("🤮 Cannot expire node")
} else {
log.Info().
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("Node successfully expired")
}
}
}
if len(expired) > 0 {
hsdb.notifier.NotifyAll(types.StateUpdate{
Type: types.StatePeerRemoved,
Removed: expired,
})
expired := make([]*tailcfg.PeerChange, len(expiredNodes))
for idx, node := range expiredNodes {
expired[idx] = &tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &started,
}
}
// Inform the peers of a node with a lightweight update.
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: expired,
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
// Inform the node itself that it has expired.
for _, node := range expiredNodes {
stateSelfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
}
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"gopkg.in/check.v1"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
@@ -25,11 +26,13 @@ func (s *Suite) TestGetNode(c *check.C) {
_, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -51,11 +54,13 @@ func (s *Suite) TestGetNodeByID(c *check.C) {
_, err = db.GetNodeByID(0)
c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -82,9 +87,8 @@ func (s *Suite) TestGetNodeByNodeKey(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -113,9 +117,8 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -130,11 +133,14 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
func (s *Suite) TestHardDeleteNode(c *check.C) {
user, err := db.CreateUser("test")
c.Assert(err, check.IsNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode3",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -160,11 +166,13 @@ func (s *Suite) TestListPeers(c *check.C) {
c.Assert(err, check.NotNil)
for index := 0; index <= 10; index++ {
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{
ID: uint64(index),
MachineKey: "foo" + strconv.Itoa(index),
NodeKey: "bar" + strconv.Itoa(index),
DiscoKey: "faa" + strconv.Itoa(index),
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode" + strconv.Itoa(index),
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -205,11 +213,13 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
c.Assert(err, check.NotNil)
for index := 0; index <= 10; index++ {
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{
ID: uint64(index),
MachineKey: "foo" + strconv.Itoa(index),
NodeKey: "bar" + strconv.Itoa(index),
DiscoKey: "faa" + strconv.Itoa(index),
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
IPAddresses: types.NodeAddresses{
netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))),
},
@@ -288,11 +298,13 @@ func (s *Suite) TestExpireNode(c *check.C) {
_, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -345,11 +357,15 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
_, err = db.GetNode("user-1", "testnode")
c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
machineKey2 := key.NewMachine()
node := &types.Node{
ID: 0,
MachineKey: "node-key-1",
NodeKey: "node-key-1",
DiscoKey: "disco-key-1",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "hostname-1",
GivenName: "hostname-1",
UserID: user1.ID,
@@ -358,25 +374,20 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
}
db.db.Save(node)
givenName, err := db.GenerateGivenName("node-key-2", "hostname-2")
givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2")
comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-2", comment)
givenName, err = db.GenerateGivenName("node-key-1", "hostname-1")
givenName, err = db.GenerateGivenName(machineKey.Public(), "hostname-1")
comment = check.Commentf("Same user, same node, same hostname, no conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-1", comment)
givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
givenName, err = db.GenerateGivenName(machineKey2.Public(), "hostname-1")
comment = check.Commentf("Same user, unique nodes, same hostname, conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
comment = check.Commentf("Unique users, unique nodes, same hostname, conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
}
func (s *Suite) TestSetTags(c *check.C) {
@@ -389,11 +400,13 @@ func (s *Suite) TestSetTags(c *check.C) {
_, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -565,6 +578,7 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
c.Assert(err, check.IsNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
defaultRouteV4 := netip.MustParsePrefix("0.0.0.0/0")
defaultRouteV6 := netip.MustParsePrefix("::/0")
@@ -574,14 +588,13 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "test",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:exit"},
RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2},
},
@@ -590,8 +603,9 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
db.db.Save(&node)
err = db.SaveNodeRoutes(&node)
sendUpdate, err := db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
node0ByID, err := db.GetNodeByID(0)
c.Assert(err, check.IsNil)

View File

@@ -77,9 +77,6 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -101,9 +98,6 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
node := types.Node{
ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -138,9 +132,6 @@ func (*Suite) TestEphemeralKey(c *check.C) {
now := time.Now().Add(-time.Second * 30)
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -207,5 +198,5 @@ func (*Suite) TestPreAuthKeyACLTags(c *check.C) {
listedPaks, err := db.ListPreAuthKeys("test8")
c.Assert(err, check.IsNil)
c.Assert(listedPaks[0].Proto().AclTags, check.DeepEquals, tags)
c.Assert(listedPaks[0].Proto().GetAclTags(), check.DeepEquals, tags)
}

View File

@@ -7,7 +7,9 @@ import (
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"gorm.io/gorm"
"tailscale.com/types/key"
)
var ErrRouteIsNotAvailable = errors.New("route is not available")
@@ -21,7 +23,38 @@ func (hsdb *HSDatabase) GetRoutes() (types.Routes, error) {
func (hsdb *HSDatabase) getRoutes() (types.Routes, error) {
var routes types.Routes
err := hsdb.db.Preload("Node").Find(&routes).Error
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) getAdvertisedAndEnabledRoutes() (types.Routes, error) {
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("advertised = ? AND enabled = ?", true, true).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) getRoutesByPrefix(pref netip.Prefix) (types.Routes, error) {
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("prefix = ?", types.IPPrefix(pref)).
Find(&routes).Error
if err != nil {
return nil, err
}
@@ -40,6 +73,7 @@ func (hsdb *HSDatabase) getNodeAdvertisedRoutes(node *types.Node) (types.Routes,
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("node_id = ? AND advertised = true", node.ID).
Find(&routes).Error
if err != nil {
@@ -60,6 +94,7 @@ func (hsdb *HSDatabase) getNodeRoutes(node *types.Node) (types.Routes, error) {
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("node_id = ?", node.ID).
Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
@@ -78,7 +113,10 @@ func (hsdb *HSDatabase) GetRoute(id uint64) (*types.Route, error) {
func (hsdb *HSDatabase) getRoute(id uint64) (*types.Route, error) {
var route types.Route
err := hsdb.db.Preload("Node").First(&route, id).Error
err := hsdb.db.
Preload("Node").
Preload("Node.User").
First(&route, id).Error
if err != nil {
return nil, err
}
@@ -122,37 +160,61 @@ func (hsdb *HSDatabase) DisableRoute(id uint64) error {
return err
}
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
if !route.IsExitRoute() {
err = hsdb.failoverRouteWithNotify(route)
if err != nil {
return err
}
route.Enabled = false
route.IsPrimary = false
err = hsdb.db.Save(route).Error
if err != nil {
return err
}
} else {
routes, err = hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
return hsdb.handlePrimarySubnetFailover()
}
routes, err := hsdb.getNodeRoutes(&route.Node)
if err != nil {
return err
}
for i := range routes {
if routes[i].IsExitRoute() {
routes[i].Enabled = false
routes[i].IsPrimary = false
err = hsdb.db.Save(&routes[i]).Error
if err != nil {
return err
for i := range routes {
if routes[i].IsExitRoute() {
routes[i].Enabled = false
routes[i].IsPrimary = false
err = hsdb.db.Save(&routes[i]).Error
if err != nil {
return err
}
}
}
}
return hsdb.handlePrimarySubnetFailover()
if routes == nil {
routes, err = hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
}
node.Routes = routes
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{&node},
Message: "called from db.DisableRoute",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil
}
func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
@@ -164,34 +226,58 @@ func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
return err
}
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
if !route.IsExitRoute() {
err := hsdb.failoverRouteWithNotify(route)
if err != nil {
return nil
}
if err := hsdb.db.Unscoped().Delete(&route).Error; err != nil {
return err
}
} else {
routes, err := hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
return hsdb.handlePrimarySubnetFailover()
}
routesToDelete := types.Routes{}
for _, r := range routes {
if r.IsExitRoute() {
routesToDelete = append(routesToDelete, r)
}
}
routes, err := hsdb.getNodeRoutes(&route.Node)
if err != nil {
return err
}
routesToDelete := types.Routes{}
for _, r := range routes {
if r.IsExitRoute() {
routesToDelete = append(routesToDelete, r)
if err := hsdb.db.Unscoped().Delete(&routesToDelete).Error; err != nil {
return err
}
}
if err := hsdb.db.Unscoped().Delete(&routesToDelete).Error; err != nil {
return err
if routes == nil {
routes, err = hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
}
return hsdb.handlePrimarySubnetFailover()
node.Routes = routes
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{&node},
Message: "called from db.DeleteRoute",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil
}
func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
@@ -204,9 +290,13 @@ func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
if err := hsdb.db.Unscoped().Delete(&routes[i]).Error; err != nil {
return err
}
// TODO(kradalby): This is a bit too aggressive, we could probably
// figure out which routes needs to be failed over rather than all.
hsdb.failoverRouteWithNotify(&routes[i])
}
return hsdb.handlePrimarySubnetFailover()
return nil
}
// isUniquePrefix returns if there is another node providing the same route already.
@@ -259,22 +349,26 @@ func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, er
// SaveNodeRoutes takes a node and updates the database with
// the new routes.
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) error {
// It returns a bool whether an update should be sent as the
// saved route impacts nodes.
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
return hsdb.saveNodeRoutes(node)
}
func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
sendUpdate := false
currentRoutes := types.Routes{}
err := hsdb.db.Where("node_id = ?", node.ID).Find(&currentRoutes).Error
if err != nil {
return err
return sendUpdate, err
}
advertisedRoutes := map[netip.Prefix]bool{}
for _, prefix := range node.HostInfo.RoutableIPs {
for _, prefix := range node.Hostinfo.RoutableIPs {
advertisedRoutes[prefix] = false
}
@@ -290,7 +384,14 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
currentRoutes[pos].Advertised = true
err := hsdb.db.Save(&currentRoutes[pos]).Error
if err != nil {
return err
return sendUpdate, err
}
// If a route that is newly "saved" is already
// enabled, set sendUpdate to true as it is now
// available.
if route.Enabled {
sendUpdate = true
}
}
advertisedRoutes[netip.Prefix(route.Prefix)] = true
@@ -299,7 +400,7 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
currentRoutes[pos].Enabled = false
err := hsdb.db.Save(&currentRoutes[pos]).Error
if err != nil {
return err
return sendUpdate, err
}
}
}
@@ -314,7 +415,41 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
}
err := hsdb.db.Create(&route).Error
if err != nil {
return err
return sendUpdate, err
}
}
}
return sendUpdate, nil
}
// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route
// currently have a functioning host that exposes the network.
func (hsdb *HSDatabase) EnsureFailoverRouteIsAvailable(node *types.Node) error {
nodeRoutes, err := hsdb.getNodeRoutes(node)
if err != nil {
return nil
}
for _, nodeRoute := range nodeRoutes {
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(nodeRoute.Prefix))
if err != nil {
return err
}
for _, route := range routes {
if route.IsPrimary {
// if we have a primary route, and the node is connected
// nothing needs to be done.
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
continue
}
// if not, we need to failover the route
err := hsdb.failoverRouteWithNotify(&route)
if err != nil {
return err
}
}
}
}
@@ -322,133 +457,185 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
return nil
}
func (hsdb *HSDatabase) HandlePrimarySubnetFailover() error {
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
return hsdb.handlePrimarySubnetFailover()
}
func (hsdb *HSDatabase) handlePrimarySubnetFailover() error {
// first, get all the enabled routes
var routes types.Routes
err := hsdb.db.
Preload("Node").
Where("advertised = ? AND enabled = ?", true, true).
Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().Err(err).Msg("error getting routes")
func (hsdb *HSDatabase) FailoverNodeRoutesWithNotify(node *types.Node) error {
routes, err := hsdb.getNodeRoutes(node)
if err != nil {
return nil
}
changedNodes := make(types.Nodes, 0)
for pos, route := range routes {
if route.IsExitRoute() {
var changedKeys []key.MachinePublic
for _, route := range routes {
changed, err := hsdb.failoverRoute(&route)
if err != nil {
return err
}
changedKeys = append(changedKeys, changed...)
}
changedKeys = lo.Uniq(changedKeys)
var nodes types.Nodes
for _, key := range changedKeys {
node, err := hsdb.GetNodeByMachineKey(key)
if err != nil {
return err
}
nodes = append(nodes, node)
}
if nodes != nil {
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: nodes,
Message: "called from db.FailoverNodeRoutesWithNotify",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
}
return nil
}
func (hsdb *HSDatabase) failoverRouteWithNotify(r *types.Route) error {
changedKeys, err := hsdb.failoverRoute(r)
if err != nil {
return err
}
if len(changedKeys) == 0 {
return nil
}
var nodes types.Nodes
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("loading machines with new primary routes from db")
for _, key := range changedKeys {
node, err := hsdb.getNodeByMachineKey(key)
if err != nil {
return err
}
nodes = append(nodes, node)
}
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("notifying peers about primary route change")
if nodes != nil {
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: nodes,
Message: "called from db.failoverRouteWithNotify",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
}
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("notified peers about primary route change")
return nil
}
// failoverRoute takes a route that is no longer available,
// this can be either from:
// - being disabled
// - being deleted
// - host going offline
//
// and tries to find a new route to take over its place.
// If the given route was not primary, it returns early.
func (hsdb *HSDatabase) failoverRoute(r *types.Route) ([]key.MachinePublic, error) {
if r == nil {
return nil, nil
}
// This route is not a primary route, and it isnt
// being served to nodes.
if !r.IsPrimary {
return nil, nil
}
// We do not have to failover exit nodes
if r.IsExitRoute() {
return nil, nil
}
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(r.Prefix))
if err != nil {
return nil, err
}
var newPrimary *types.Route
// Find a new suitable route
for idx, route := range routes {
if r.ID == route.ID {
continue
}
node := &route.Node
if !route.IsPrimary {
_, err := hsdb.getPrimaryRoute(netip.Prefix(route.Prefix))
if hsdb.isUniquePrefix(route) || errors.Is(err, gorm.ErrRecordNotFound) {
log.Info().
Str("prefix", netip.Prefix(route.Prefix).String()).
Str("node", route.Node.GivenName).
Msg("Setting primary route")
routes[pos].IsPrimary = true
err := hsdb.db.Save(&routes[pos]).Error
if err != nil {
log.Error().Err(err).Msg("error marking route as primary")
return err
}
changedNodes = append(changedNodes, node)
continue
}
if !route.Enabled {
continue
}
if route.IsPrimary {
if route.Node.IsOnline() {
continue
}
// node offline, find a new primary
log.Info().
Str("node", route.Node.Hostname).
Str("prefix", netip.Prefix(route.Prefix).String()).
Msgf("node offline, finding a new primary subnet")
// find a new primary route
var newPrimaryRoutes types.Routes
err := hsdb.db.
Preload("Node").
Where("prefix = ? AND node_id != ? AND advertised = ? AND enabled = ?",
route.Prefix,
route.NodeID,
true, true).
Find(&newPrimaryRoutes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().Err(err).Msg("error finding new primary route")
return err
}
var newPrimaryRoute *types.Route
for pos, r := range newPrimaryRoutes {
if r.Node.IsOnline() {
newPrimaryRoute = &newPrimaryRoutes[pos]
break
}
}
if newPrimaryRoute == nil {
log.Warn().
Str("node", route.Node.Hostname).
Str("prefix", netip.Prefix(route.Prefix).String()).
Msgf("no alternative primary route found")
continue
}
log.Info().
Str("old_node", route.Node.Hostname).
Str("prefix", netip.Prefix(route.Prefix).String()).
Str("new_node", newPrimaryRoute.Node.Hostname).
Msgf("found new primary route")
// disable the old primary route
routes[pos].IsPrimary = false
err = hsdb.db.Save(&routes[pos]).Error
if err != nil {
log.Error().Err(err).Msg("error disabling old primary route")
return err
}
// enable the new primary route
newPrimaryRoute.IsPrimary = true
err = hsdb.db.Save(&newPrimaryRoute).Error
if err != nil {
log.Error().Err(err).Msg("error enabling new primary route")
return err
}
changedNodes = append(changedNodes, node)
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
newPrimary = &routes[idx]
break
}
}
if len(changedNodes) > 0 {
hsdb.notifier.NotifyAll(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: changedNodes,
})
// If a new route was not found/available,
// return with an error.
// We do not want to update the database as
// the one currently marked as primary is the
// best we got.
if newPrimary == nil {
return nil, nil
}
return nil
log.Trace().
Str("hostname", newPrimary.Node.Hostname).
Msg("found new primary, updating db")
// Remove primary from the old route
r.IsPrimary = false
err = hsdb.db.Save(&r).Error
if err != nil {
log.Error().Err(err).Msg("error disabling new primary route")
return nil, err
}
log.Trace().
Str("hostname", newPrimary.Node.Hostname).
Msg("removed primary from old route")
// Set primary for the new primary
newPrimary.IsPrimary = true
err = hsdb.db.Save(&newPrimary).Error
if err != nil {
log.Error().Err(err).Msg("error enabling new primary route")
return nil, err
}
log.Trace().
Str("hostname", newPrimary.Node.Hostname).
Msg("set primary to new route")
// Return a list of the machinekeys of the changed nodes.
return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil
}
// EnableAutoApprovedRoutes enables any routes advertised by a node that match the ACL autoApprovers policy.
@@ -456,13 +643,19 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
aclPolicy *policy.ACLPolicy,
node *types.Node,
) error {
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
if len(aclPolicy.AutoApprovers.ExitNode) == 0 && len(aclPolicy.AutoApprovers.Routes) == 0 {
// No autoapprovers configured
return nil
}
if len(node.IPAddresses) == 0 {
return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
// This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
return nil
}
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
routes, err := hsdb.getNodeAdvertisedRoutes(node)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().
@@ -474,6 +667,8 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
return err
}
log.Trace().Interface("routes", routes).Msg("routes for autoapproving")
approvedRoutes := types.Routes{}
for _, advertisedRoute := range routes {
@@ -493,6 +688,13 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
return err
}
log.Trace().
Str("node", node.Hostname).
Str("user", node.User.Name).
Strs("routeApprovers", routeApprovers).
Str("prefix", netip.Prefix(advertisedRoute.Prefix).String()).
Msg("looking up route for autoapproving")
for _, approvedAlias := range routeApprovers {
if approvedAlias == node.User.Name {
approvedRoutes = append(approvedRoutes, advertisedRoute)

View File

@@ -2,12 +2,19 @@ package db
import (
"net/netip"
"os"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/stretchr/testify/assert"
"gopkg.in/check.v1"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
func (s *Suite) TestGetRoutes(c *check.C) {
@@ -29,19 +36,17 @@ func (s *Suite) TestGetRoutes(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_get_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
db.db.Save(&node)
err = db.SaveNodeRoutes(&node)
su, err := db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil)
c.Assert(su, check.Equals, false)
advertisedRoutes, err := db.GetAdvertisedRoutes(&node)
c.Assert(err, check.IsNil)
@@ -80,19 +85,17 @@ func (s *Suite) TestGetEnableRoutes(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
db.db.Save(&node)
err = db.SaveNodeRoutes(&node)
sendUpdate, err := db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
availableRoutes, err := db.GetAdvertisedRoutes(&node)
c.Assert(err, check.IsNil)
@@ -154,19 +157,17 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
}
node1 := types.Node{
ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo1),
Hostinfo: &hostInfo1,
}
db.db.Save(&node1)
err = db.SaveNodeRoutes(&node1)
sendUpdate, err := db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node1, route.String())
c.Assert(err, check.IsNil)
@@ -179,19 +180,17 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
}
node2 := types.Node{
ID: 2,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo2),
Hostinfo: &hostInfo2,
}
db.db.Save(&node2)
err = db.SaveNodeRoutes(&node2)
sendUpdate, err = db.SaveNodeRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node2, route2.String())
c.Assert(err, check.IsNil)
@@ -213,148 +212,6 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
c.Assert(len(routes), check.Equals, 0)
}
func (s *Suite) TestSubnetFailover(c *check.C) {
user, err := db.CreateUser("test")
c.Assert(err, check.IsNil)
pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = db.GetNode("test", "test_enable_route_node")
c.Assert(err, check.NotNil)
prefix, err := netip.ParsePrefix(
"10.0.0.0/24",
)
c.Assert(err, check.IsNil)
prefix2, err := netip.ParsePrefix(
"150.0.10.0/25",
)
c.Assert(err, check.IsNil)
hostInfo1 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2},
}
now := time.Now()
node1 := types.Node{
ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo1),
LastSeen: &now,
}
db.db.Save(&node1)
err = db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node1, prefix.String())
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node1, prefix2.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err := db.GetEnabledRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 2)
route, err := db.getPrimaryRoute(prefix)
c.Assert(err, check.IsNil)
c.Assert(route.NodeID, check.Equals, node1.ID)
hostInfo2 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix2},
}
node2 := types.Node{
ID: 2,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo2),
LastSeen: &now,
}
db.db.Save(&node2)
err = db.saveNodeRoutes(&node2)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node2, prefix2.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err = db.GetEnabledRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 2)
enabledRoutes2, err := db.GetEnabledRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes2), check.Equals, 1)
routes, err := db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 0)
// lets make node1 lastseen 10 mins ago
before := now.Add(-10 * time.Minute)
node1.LastSeen = &before
err = db.db.Save(&node1).Error
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
routes, err = db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 1)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 1)
node2.HostInfo = types.HostInfo(tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2},
})
err = db.db.Save(&node2).Error
c.Assert(err, check.IsNil)
err = db.SaveNodeRoutes(&node2)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node2, prefix.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
routes, err = db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 0)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
}
func (s *Suite) TestDeleteRoutes(c *check.C) {
user, err := db.CreateUser("test")
c.Assert(err, check.IsNil)
@@ -382,20 +239,18 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
now := time.Now()
node1 := types.Node{
ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo1),
Hostinfo: &hostInfo1,
LastSeen: &now,
}
db.db.Save(&node1)
err = db.SaveNodeRoutes(&node1)
sendUpdate, err := db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node1, prefix.String())
c.Assert(err, check.IsNil)
@@ -413,3 +268,420 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 1)
}
func TestFailoverRoute(t *testing.T) {
ipp := func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) }
// TODO(kradalby): Count/verify updates
var sink chan types.StateUpdate
go func() {
for range sink {
}
}()
machineKeys := []key.MachinePublic{
key.NewMachine().Public(),
key.NewMachine().Public(),
key.NewMachine().Public(),
key.NewMachine().Public(),
}
tests := []struct {
name string
failingRoute types.Route
routes types.Routes
want []key.MachinePublic
wantErr bool
}{
{
name: "no-route",
failingRoute: types.Route{},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "no-prime",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "exit-node",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("0.0.0.0/0"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "no-failover-single-route",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: true,
},
},
want: []key.MachinePublic{
machineKeys[0],
machineKeys[1],
},
wantErr: false,
},
{
name: "failover-none-primary",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: true,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary-multi-route",
failingRoute: types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
Enabled: true,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
ID: 3,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[2],
},
IsPrimary: false,
Enabled: true,
},
},
want: []key.MachinePublic{
machineKeys[1],
machineKeys[0],
},
wantErr: false,
},
{
name: "failover-primary-no-online",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// Offline
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[3],
},
IsPrimary: false,
Enabled: true,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary-one-not-online",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// Offline
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[3],
},
IsPrimary: false,
Enabled: true,
},
types.Route{
Model: gorm.Model{
ID: 3,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
},
want: []key.MachinePublic{
machineKeys[0],
machineKeys[1],
},
wantErr: false,
},
{
name: "failover-primary-none-enabled",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// not enabled
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: false,
},
},
want: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "failover-db-test")
assert.NoError(t, err)
notif := notifier.NewNotifier()
db, err = NewHeadscaleDatabase(
"sqlite3",
tmpDir+"/headscale_test.db",
false,
notif,
[]netip.Prefix{
netip.MustParsePrefix("10.27.0.0/23"),
},
"",
)
assert.NoError(t, err)
// Pretend that all the nodes are connected to control
for idx, key := range machineKeys {
// Pretend one node is offline
if idx == 3 {
continue
}
notif.AddNode(key, sink)
}
for _, route := range tt.routes {
if err := db.db.Save(&route).Error; err != nil {
t.Fatalf("failed to create route: %s", err)
}
}
got, err := db.failoverRoute(&tt.failingRoute)
if (err != nil) != tt.wantErr {
t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
t.Errorf("failoverRoute() unexpected result (-want +got):\n%s", diff)
}
})
}
}

View File

@@ -1,6 +1,7 @@
package db
import (
"log"
"net/netip"
"os"
"testing"
@@ -27,19 +28,22 @@ func (s *Suite) SetUpTest(c *check.C) {
}
func (s *Suite) TearDownTest(c *check.C) {
os.RemoveAll(tmpDir)
// os.RemoveAll(tmpDir)
}
func (s *Suite) ResetDB(c *check.C) {
if len(tmpDir) != 0 {
os.RemoveAll(tmpDir)
}
// if len(tmpDir) != 0 {
// os.RemoveAll(tmpDir)
// }
var err error
tmpDir, err = os.MkdirTemp("", "autoygg-client-test")
tmpDir, err = os.MkdirTemp("", "headscale-db-test-*")
if err != nil {
c.Fatal(err)
}
log.Printf("database path: %s", tmpDir+"/headscale_test.db")
db, err = NewHeadscaleDatabase(
"sqlite3",
tmpDir+"/headscale_test.db",

View File

@@ -48,9 +48,6 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -103,9 +100,6 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
node := types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode",
UserID: oldUser.ID,
RegisterMethod: util.RegisterMethodAuthKey,

View File

@@ -13,6 +13,7 @@ import (
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/derp"
"tailscale.com/net/stun"
@@ -39,7 +40,7 @@ func NewDERPServer(
cfg *types.DERPConfig,
) (*DERPServer, error) {
log.Trace().Caller().Msg("Creating new embedded DERP server")
server := derp.NewServer(derpKey, log.Debug().Msgf) // nolint // zerolinter complains
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
return &DERPServer{
serverURL: serverURL,
@@ -83,6 +84,8 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
RegionID: d.cfg.ServerRegionID,
HostName: host,
DERPPort: port,
IPv4: d.cfg.IPv4,
IPv6: d.cfg.IPv6,
},
},
}
@@ -98,6 +101,7 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
localDERPregion.Nodes[0].STUNPort = portSTUN
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0])
return localDERPregion, nil
}
@@ -207,6 +211,7 @@ func DERPProbeHandler(
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL
func DERPBootstrapDNSHandler(
derpMap *tailcfg.DERPMap,
) func(http.ResponseWriter, *http.Request) {

View File

@@ -172,12 +172,18 @@ func (api headscaleV1APIServer) RegisterNode(
) (*v1.RegisterNodeResponse, error) {
log.Trace().
Str("user", request.GetUser()).
Str("node_key", request.GetKey()).
Str("machine_key", request.GetKey()).
Msg("Registering node")
var mkey key.MachinePublic
err := mkey.UnmarshalText([]byte(request.GetKey()))
if err != nil {
return nil, err
}
node, err := api.h.db.RegisterNodeFromAuthCallback(
api.h.registrationCache,
request.GetKey(),
mkey,
request.GetUser(),
nil,
util.RegisterMethodCLI,
@@ -198,7 +204,13 @@ func (api headscaleV1APIServer) GetNode(
return nil, err
}
return &v1.GetNodeResponse{Node: node.Proto()}, nil
resp := node.Proto()
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
return &v1.GetNodeResponse{Node: resp}, nil
}
func (api headscaleV1APIServer) SetTags(
@@ -327,7 +339,13 @@ func (api headscaleV1APIServer) ListNodes(
response := make([]*v1.Node, len(nodes))
for index, node := range nodes {
response[index] = node.Proto()
resp := node.Proto()
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
response[index] = resp
}
return &v1.ListNodesResponse{Nodes: response}, nil
@@ -340,13 +358,18 @@ func (api headscaleV1APIServer) ListNodes(
response := make([]*v1.Node, len(nodes))
for index, node := range nodes {
m := node.Proto()
resp := node.Proto()
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
validTags, invalidTags := api.h.ACLPolicy.TagsOfNode(
&node,
)
m.InvalidTags = invalidTags
m.ValidTags = validTags
response[index] = m
resp.InvalidTags = invalidTags
resp.ValidTags = validTags
response[index] = resp
}
return &v1.ListNodesResponse{Nodes: response}, nil
@@ -521,13 +544,22 @@ func (api headscaleV1APIServer) DebugCreateNode(
Hostname: "DebugTestNode",
}
givenName, err := api.h.db.GenerateGivenName(request.GetKey(), request.GetName())
var mkey key.MachinePublic
err = mkey.UnmarshalText([]byte(request.GetKey()))
if err != nil {
return nil, err
}
givenName, err := api.h.db.GenerateGivenName(mkey, request.GetName())
if err != nil {
return nil, err
}
nodeKey := key.NewNode()
newNode := types.Node{
MachineKey: request.GetKey(),
MachineKey: mkey,
NodeKey: nodeKey.Public(),
Hostname: request.GetName(),
GivenName: givenName,
User: *user,
@@ -535,17 +567,15 @@ func (api headscaleV1APIServer) DebugCreateNode(
Expiry: &time.Time{},
LastSeen: &time.Time{},
HostInfo: types.HostInfo(hostinfo),
Hostinfo: &hostinfo,
}
nodeKey := key.NodePublic{}
err = nodeKey.UnmarshalText([]byte(request.GetKey()))
if err != nil {
log.Panic().Msg("can not add node for debug. invalid node key")
}
log.Debug().
Str("machine_key", mkey.ShortString()).
Msg("adding debug machine via CLI, appending to registration cache")
api.h.registrationCache.Set(
util.NodePublicKeyStripPrefix(nodeKey),
mkey.String(),
newNode,
registerCacheExpiration,
)

View File

@@ -1,15 +0,0 @@
//go:build ts2019
package hscontrol
import (
"net/http"
"github.com/gorilla/mux"
)
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
Methods(http.MethodPost)
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
}

View File

@@ -1,8 +0,0 @@
//go:build !ts2019
package hscontrol
import "github.com/gorilla/mux"
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
}

View File

@@ -4,13 +4,13 @@ import (
"bytes"
"encoding/json"
"errors"
"fmt"
"html/template"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
@@ -36,6 +36,22 @@ const (
var ErrRegisterMethodCLIDoesNotSupportExpire = errors.New(
"machines registered with CLI does not support expire",
)
var ErrNoCapabilityVersion = errors.New("no capability version set")
func parseCabailityVersion(req *http.Request) (tailcfg.CapabilityVersion, error) {
clientCapabilityStr := req.URL.Query().Get("v")
if clientCapabilityStr == "" {
return 0, ErrNoCapabilityVersion
}
clientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr)
if err != nil {
return 0, fmt.Errorf("failed to parse capability version: %w", err)
}
return tailcfg.CapabilityVersion(clientCapabilityVersion), nil
}
// KeyHandler provides the Headscale pub key
// Listens in /key.
@@ -44,59 +60,58 @@ func (h *Headscale) KeyHandler(
req *http.Request,
) {
// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion
clientCapabilityStr := req.URL.Query().Get("v")
if clientCapabilityStr != "" {
log.Debug().
Str("handler", "/key").
Str("v", clientCapabilityStr).
Msg("New noise client")
clientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr)
if err != nil {
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("Wrong params"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// TS2021 (Tailscale v2 protocol) requires to have a different key
if clientCapabilityVersion >= NoiseCapabilityVersion {
resp := tailcfg.OverTLSPublicKeyResponse{
LegacyPublicKey: h.privateKey2019.Public(),
PublicKey: h.noisePrivateKey.Public(),
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
err = json.NewEncoder(writer).Encode(resp)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
}
log.Debug().
Str("handler", "/key").
Msg("New legacy client")
// Old clients don't send a 'v' parameter, so we send the legacy public key
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusOK)
_, err := writer.Write([]byte(util.MachinePublicKeyStripPrefix(h.privateKey2019.Public())))
capVer, err := parseCabailityVersion(req)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
Msg("could not get capability version")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusInternalServerError)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
log.Debug().
Str("handler", "/key").
Int("cap_ver", int(capVer)).
Msg("New noise client")
if err != nil {
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("Wrong params"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// TS2021 (Tailscale v2 protocol) requires to have a different key
if capVer >= NoiseCapabilityVersion {
resp := tailcfg.OverTLSPublicKeyResponse{
PublicKey: h.noisePrivateKey.Public(),
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
err = json.NewEncoder(writer).Encode(resp)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
}
@@ -169,33 +184,16 @@ func (h *Headscale) RegisterWebAPI(
req *http.Request,
) {
vars := mux.Vars(req)
nodeKeyStr, ok := vars["nkey"]
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
machineKeyStr := vars["mkey"]
// We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error.
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText(
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(machineKeyStr),
)
if !ok || nodeKeyStr == "" || err != nil {
if err != nil {
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -213,7 +211,7 @@ func (h *Headscale) RegisterWebAPI(
var content bytes.Buffer
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
Key: nodeKeyStr,
Key: machineKey.String(),
}); err != nil {
log.Error().
Str("func", "RegisterWebAPI").

View File

@@ -8,6 +8,7 @@ import (
"net/url"
"os"
"path"
"slices"
"sort"
"strings"
"sync"
@@ -20,12 +21,11 @@ import (
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"golang.org/x/exp/maps"
"tailscale.com/envknob"
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
)
const (
@@ -46,11 +46,9 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
// - Keep information about the previous mapresponse so we can send a diff
// - Store hashes
// - Create a "minifier" that removes info not needed for the node
// - some sort of batching, wait for 5 or 60 seconds before sending
type Mapper struct {
privateKey2019 *key.MachinePrivate
isNoise bool
// Configuration
// TODO(kradalby): figure out if this is the format we want this in
derpMap *tailcfg.DERPMap
@@ -65,15 +63,19 @@ type Mapper struct {
// Map isnt concurrency safe, so we need to ensure
// only one func is accessing it over time.
mu sync.Mutex
peers map[uint64]*types.Node
mu sync.Mutex
peers map[uint64]*types.Node
patches map[uint64][]patch
}
type patch struct {
timestamp time.Time
change *tailcfg.PeerChange
}
func NewMapper(
node *types.Node,
peers types.Nodes,
privateKey *key.MachinePrivate,
isNoise bool,
derpMap *tailcfg.DERPMap,
baseDomain string,
dnsCfg *tailcfg.DNSConfig,
@@ -82,16 +84,12 @@ func NewMapper(
) *Mapper {
log.Debug().
Caller().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("creating new mapper")
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
return &Mapper{
privateKey2019: privateKey,
isNoise: isNoise,
derpMap: derpMap,
baseDomain: baseDomain,
dnsCfg: dnsCfg,
@@ -103,7 +101,8 @@ func NewMapper(
seq: 0,
// TODO: populate
peers: peers.IDMap(),
peers: peers.IDMap(),
patches: make(map[uint64][]patch),
}
}
@@ -192,7 +191,7 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
attrs := url.Values{
"device_name": []string{node.Hostname},
"device_model": []string{node.HostInfo.OS},
"device_model": []string{node.Hostinfo.OS},
}
if len(node.IPAddresses) > 0 {
@@ -209,10 +208,11 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
func (m *Mapper) fullMapResponse(
node *types.Node,
pol *policy.ACLPolicy,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
peers := nodeMapToList(m.peers)
resp, err := m.baseWithConfigMapResponse(node, pol)
resp, err := m.baseWithConfigMapResponse(node, pol, capVer)
if err != nil {
return nil, err
}
@@ -221,10 +221,12 @@ func (m *Mapper) fullMapResponse(
resp,
pol,
node,
capVer,
peers,
peers,
m.baseDomain,
m.dnsCfg,
m.randomClientPort,
)
if err != nil {
return nil, err
@@ -242,13 +244,22 @@ func (m *Mapper) FullMapResponse(
m.mu.Lock()
defer m.mu.Unlock()
resp, err := m.fullMapResponse(node, pol)
if err != nil {
return nil, err
peers := maps.Keys(m.peers)
peersWithPatches := maps.Keys(m.patches)
slices.Sort(peers)
slices.Sort(peersWithPatches)
if len(peersWithPatches) > 0 {
log.Debug().
Str("node", node.Hostname).
Uints64("peers", peers).
Uints64("pending_patches", peersWithPatches).
Msgf("node requested full map response, but has pending patches")
}
if m.isNoise {
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
resp, err := m.fullMapResponse(node, pol, mapRequest.Version)
if err != nil {
return nil, err
}
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
@@ -262,15 +273,23 @@ func (m *Mapper) LiteMapResponse(
node *types.Node,
pol *policy.ACLPolicy,
) ([]byte, error) {
resp, err := m.baseWithConfigMapResponse(node, pol)
resp, err := m.baseWithConfigMapResponse(node, pol, mapRequest.Version)
if err != nil {
return nil, err
}
if m.isNoise {
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
pol,
node,
nodeMapToList(m.peers),
)
if err != nil {
return nil, err
}
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
resp.SSHPolicy = sshPolicy
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
}
@@ -287,10 +306,12 @@ func (m *Mapper) KeepAliveResponse(
func (m *Mapper) DERPMapResponse(
mapRequest tailcfg.MapRequest,
node *types.Node,
derpMap tailcfg.DERPMap,
derpMap *tailcfg.DERPMap,
) ([]byte, error) {
m.derpMap = derpMap
resp := m.baseMapResponse()
resp.DERPMap = &derpMap
resp.DERPMap = derpMap
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
}
@@ -300,18 +321,29 @@ func (m *Mapper) PeerChangedResponse(
node *types.Node,
changed types.Nodes,
pol *policy.ACLPolicy,
messages ...string,
) ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
lastSeen := make(map[tailcfg.NodeID]bool)
// Update our internal map.
for _, node := range changed {
m.peers[node.ID] = node
if patches, ok := m.patches[node.ID]; ok {
// preserve online status in case the patch has an outdated one
online := node.IsOnline
// We have just seen the node, let the peers update their list.
lastSeen[tailcfg.NodeID(node.ID)] = true
for _, p := range patches {
// TODO(kradalby): Figure if this needs to be sorted by timestamp
node.ApplyPeerChange(p.change)
}
// Ensure the patches are not applied again later
delete(m.patches, node.ID)
node.IsOnline = online
}
m.peers[node.ID] = node
}
resp := m.baseMapResponse()
@@ -320,20 +352,66 @@ func (m *Mapper) PeerChangedResponse(
&resp,
pol,
node,
mapRequest.Version,
nodeMapToList(m.peers),
changed,
m.baseDomain,
m.dnsCfg,
m.randomClientPort,
)
if err != nil {
return nil, err
}
// resp.PeerSeenChange = lastSeen
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...)
}
// PeerChangedPatchResponse creates a patch MapResponse with
// incoming update from a state change.
func (m *Mapper) PeerChangedPatchResponse(
mapRequest tailcfg.MapRequest,
node *types.Node,
changed []*tailcfg.PeerChange,
pol *policy.ACLPolicy,
) ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
sendUpdate := false
// patch the internal map
for _, change := range changed {
if peer, ok := m.peers[uint64(change.NodeID)]; ok {
peer.ApplyPeerChange(change)
sendUpdate = true
} else {
log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname)
p := patch{
timestamp: time.Now(),
change: change,
}
if patches, ok := m.patches[uint64(change.NodeID)]; ok {
patches := append(patches, p)
m.patches[uint64(change.NodeID)] = patches
} else {
m.patches[uint64(change.NodeID)] = []patch{p}
}
}
}
if !sendUpdate {
return nil, nil
}
resp := m.baseMapResponse()
resp.PeersChangedPatch = changed
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
}
// TODO(kradalby): We need some integration tests for this.
func (m *Mapper) PeerRemovedResponse(
mapRequest tailcfg.MapRequest,
node *types.Node,
@@ -342,13 +420,23 @@ func (m *Mapper) PeerRemovedResponse(
m.mu.Lock()
defer m.mu.Unlock()
// Some nodes might have been removed already
// so we dont want to ask downstream to remove
// twice, than can cause a panic in tailscaled.
notYetRemoved := []tailcfg.NodeID{}
// remove from our internal map
for _, id := range removed {
if _, ok := m.peers[uint64(id)]; ok {
notYetRemoved = append(notYetRemoved, id)
}
delete(m.peers, uint64(id))
delete(m.patches, uint64(id))
}
resp := m.baseMapResponse()
resp.PeersRemoved = removed
resp.PeersRemoved = notYetRemoved
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
}
@@ -358,20 +446,10 @@ func (m *Mapper) marshalMapResponse(
resp *tailcfg.MapResponse,
node *types.Node,
compression string,
messages ...string,
) ([]byte, error) {
atomic.AddUint64(&m.seq, 1)
var machineKey key.MachinePublic
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot parse client key")
return nil, err
}
jsonBody, err := json.Marshal(resp)
if err != nil {
log.Error().
@@ -382,11 +460,27 @@ func (m *Mapper) marshalMapResponse(
if debugDumpMapResponsePath != "" {
data := map[string]interface{}{
"Messages": messages,
"MapRequest": mapRequest,
"MapResponse": resp,
}
body, err := json.Marshal(data)
responseType := "keepalive"
switch {
case resp.Peers != nil && len(resp.Peers) > 0:
responseType = "full"
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
responseType = "lite"
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
responseType = "changed"
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
responseType = "patch"
case resp.PeersRemoved != nil && len(resp.PeersRemoved) > 0:
responseType = "removed"
}
body, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Error().
Caller().
@@ -405,7 +499,7 @@ func (m *Mapper) marshalMapResponse(
mapResponsePath := path.Join(
mPath,
fmt.Sprintf("%d-%s-%d.json", now, m.uid, atomic.LoadUint64(&m.seq)),
fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType),
)
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
@@ -418,15 +512,8 @@ func (m *Mapper) marshalMapResponse(
var respBody []byte
if compression == util.ZstdCompression {
respBody = zstdEncode(jsonBody)
if !m.isNoise { // if legacy protocol
respBody = m.privateKey2019.SealTo(machineKey, respBody)
}
} else {
if !m.isNoise { // if legacy protocol
respBody = m.privateKey2019.SealTo(machineKey, jsonBody)
} else {
respBody = jsonBody
}
respBody = jsonBody
}
data := make([]byte, reservedResponseHeaderSize)
@@ -436,32 +523,6 @@ func (m *Mapper) marshalMapResponse(
return data, nil
}
// MarshalResponse takes an Tailscale Response, marhsal it to JSON.
// If isNoise is set, then the JSON body will be returned
// If !isNoise and privateKey2019 is set, the JSON body will be sealed in a Nacl box.
func MarshalResponse(
resp interface{},
isNoise bool,
privateKey2019 *key.MachinePrivate,
machineKey key.MachinePublic,
) ([]byte, error) {
jsonBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot marshal response")
return nil, err
}
if !isNoise && privateKey2019 != nil {
return privateKey2019.SealTo(machineKey, jsonBody), nil
}
return jsonBody, nil
}
func zstdEncode(in []byte) []byte {
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
if !ok {
@@ -495,6 +556,7 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
resp := tailcfg.MapResponse{
KeepAlive: false,
ControlTime: &now,
// TODO(kradalby): Implement PingRequest?
}
return resp
@@ -507,10 +569,11 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
func (m *Mapper) baseWithConfigMapResponse(
node *types.Node,
pol *policy.ACLPolicy,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) {
resp := m.baseMapResponse()
tailnode, err := tailNode(node, pol, m.dnsCfg, m.baseDomain)
tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
if err != nil {
return nil, err
}
@@ -527,8 +590,7 @@ func (m *Mapper) baseWithConfigMapResponse(
resp.KeepAlive = false
resp.Debug = &tailcfg.Debug{
DisableLogTail: !m.logtail,
RandomizeClientPort: m.randomClientPort,
DisableLogTail: !m.logtail,
}
return &resp, nil
@@ -544,15 +606,6 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
return ret
}
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
return lo.Filter(peers, func(item *types.Node, index int) bool {
// Filter out nodes that are expired OR
// nodes that has no endpoints, this typically means they have
// registered, but are not configured.
return !item.IsExpired() || len(item.Endpoints) > 0
})
}
// appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed.
func appendPeerChanges(
@@ -560,10 +613,12 @@ func appendPeerChanges(
pol *policy.ACLPolicy,
node *types.Node,
capVer tailcfg.CapabilityVersion,
peers types.Nodes,
changed types.Nodes,
baseDomain string,
dnsCfg *tailcfg.DNSConfig,
randomClientPort bool,
) error {
fullChange := len(peers) == len(changed)
@@ -576,9 +631,6 @@ func appendPeerChanges(
return err
}
// Filter out peers that have expired.
changed = filterExpiredAndNotReady(changed)
// If there are filter rules present, see if there are any nodes that cannot
// access eachother at all and remove them from the peers.
if len(rules) > 0 {
@@ -594,7 +646,7 @@ func appendPeerChanges(
peers,
)
tailPeers, err := tailNodes(changed, pol, dnsCfg, baseDomain)
tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort)
if err != nil {
return err
}
@@ -614,8 +666,5 @@ func appendPeerChanges(
resp.UserProfiles = profiles
resp.SSHPolicy = sshPolicy
// TODO(kradalby): This currently does not take last seen in keepalives into account
resp.OnlineChange = peers.OnlineNodeMap()
return nil
}

View File

@@ -166,10 +166,16 @@ func Test_fullMapResponse(t *testing.T) {
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
mini := &types.Node{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
ID: 0,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
GivenName: "mini",
@@ -180,8 +186,7 @@ func Test_fullMapResponse(t *testing.T) {
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
@@ -226,28 +231,32 @@ func Test_fullMapResponse(t *testing.T) {
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
Capabilities: []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
tailcfg.NodeAttrDisableUPnP,
},
}
peer1 := &types.Node{
ID: 1,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
ID: 1,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1",
GivenName: "peer1",
@@ -256,8 +265,7 @@ func Test_fullMapResponse(t *testing.T) {
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{},
CreatedAt: created,
}
@@ -278,28 +286,32 @@ func Test_fullMapResponse(t *testing.T) {
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
Capabilities: []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
tailcfg.NodeAttrDisableUPnP,
},
}
peer2 := &types.Node{
ID: 2,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
ID: 2,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
GivenName: "peer2",
@@ -308,8 +320,7 @@ func Test_fullMapResponse(t *testing.T) {
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{},
CreatedAt: created,
}
@@ -387,7 +398,6 @@ func Test_fullMapResponse(t *testing.T) {
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
OnlineChange: map[tailcfg.NodeID]bool{tailPeer1.ID: false},
PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
@@ -429,10 +439,6 @@ func Test_fullMapResponse(t *testing.T) {
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
OnlineChange: map[tailcfg.NodeID]bool{
tailPeer1.ID: false,
tailcfg.NodeID(peer2.ID): false,
},
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
@@ -459,8 +465,6 @@ func Test_fullMapResponse(t *testing.T) {
mappy := NewMapper(
tt.node,
tt.peers,
nil,
false,
tt.derpMap,
tt.baseDomain,
tt.dnsConfig,
@@ -471,6 +475,7 @@ func Test_fullMapResponse(t *testing.T) {
got, err := mappy.fullMapResponse(
tt.node,
tt.pol,
0,
)
if (err != nil) != tt.wantErr {

View File

@@ -15,18 +15,22 @@ import (
func tailNodes(
nodes types.Nodes,
capVer tailcfg.CapabilityVersion,
pol *policy.ACLPolicy,
dnsConfig *tailcfg.DNSConfig,
baseDomain string,
randomClientPort bool,
) ([]*tailcfg.Node, error) {
tNodes := make([]*tailcfg.Node, len(nodes))
for index, node := range nodes {
node, err := tailNode(
node,
capVer,
pol,
dnsConfig,
baseDomain,
randomClientPort,
)
if err != nil {
return nil, err
@@ -42,25 +46,12 @@ func tailNodes(
// as per the expected behaviour in the official SaaS.
func tailNode(
node *types.Node,
capVer tailcfg.CapabilityVersion,
pol *policy.ACLPolicy,
dnsConfig *tailcfg.DNSConfig,
baseDomain string,
randomClientPort bool,
) (*tailcfg.Node, error) {
nodeKey, err := node.NodePublicKey()
if err != nil {
return nil, err
}
machineKey, err := node.MachinePublicKey()
if err != nil {
return nil, err
}
discoKey, err := node.DiscoPublicKey()
if err != nil {
return nil, err
}
addrs := node.IPAddresses.Prefixes()
allowedIPs := append(
@@ -81,8 +72,8 @@ func tailNode(
}
var derp string
if node.HostInfo.NetInfo != nil {
derp = fmt.Sprintf("127.3.3.40:%d", node.HostInfo.NetInfo.PreferredDERP)
if node.Hostinfo.NetInfo != nil {
derp = fmt.Sprintf("127.3.3.40:%d", node.Hostinfo.NetInfo.PreferredDERP)
} else {
derp = "127.3.3.40:0" // Zero means disconnected or unknown.
}
@@ -96,13 +87,9 @@ func tailNode(
hostname, err := node.GetFQDN(dnsConfig, baseDomain)
if err != nil {
return nil, err
return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err)
}
hostInfo := node.GetHostInfo()
online := node.IsOnline()
tags, _ := pol.TagsOfNode(node)
tags = lo.Uniq(append(tags, node.ForcedTags...))
@@ -112,35 +99,64 @@ func tailNode(
strconv.FormatUint(node.ID, util.Base10),
), // in headscale, unlike tailcontrol server, IDs are permanent
Name: hostname,
Cap: capVer,
User: tailcfg.UserID(node.UserID),
Key: nodeKey,
Key: node.NodeKey,
KeyExpiry: keyExpiry,
Machine: machineKey,
DiscoKey: discoKey,
Machine: node.MachineKey,
DiscoKey: node.DiscoKey,
Addresses: addrs,
AllowedIPs: allowedIPs,
Endpoints: node.Endpoints,
DERP: derp,
Hostinfo: hostInfo.View(),
Hostinfo: node.Hostinfo.View(),
Created: node.CreatedAt,
Online: node.IsOnline,
Tags: tags,
PrimaryRoutes: primaryPrefixes,
LastSeen: node.LastSeen,
Online: &online,
KeepAlive: true,
MachineAuthorized: !node.IsExpired(),
Expired: node.IsExpired(),
}
Capabilities: []string{
// - 74: 2023-09-18: Client understands NodeCapMap
if capVer >= 74 {
tNode.CapMap = tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
}
if randomClientPort {
tNode.CapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{}
}
} else {
tNode.Capabilities = []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
}
if randomClientPort {
tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrRandomizeClientPort)
}
}
// - 72: 2023-08-23: TS-2023-006 UPnP issue fixed; UPnP can now be used again
if capVer < 72 {
tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrDisableUPnP)
}
if node.IsOnline == nil || !*node.IsOnline {
// LastSeen is only set when node is
// not connected to the control server.
tNode.LastSeen = node.LastSeen
}
return &tNode, nil

View File

@@ -53,21 +53,42 @@ func TestTailNode(t *testing.T) {
wantErr bool
}{
{
name: "empty-node",
node: &types.Node{},
name: "empty-node",
node: &types.Node{
Hostinfo: &tailcfg.Hostinfo{},
},
pol: &policy.ACLPolicy{},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "",
want: nil,
wantErr: true,
want: &tailcfg.Node{
StableID: "0",
Addresses: []netip.Prefix{},
AllowedIPs: []netip.Prefix{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
MachineAuthorized: true,
Capabilities: []tailcfg.NodeCapability{
"https://tailscale.com/cap/file-sharing", "https://tailscale.com/cap/is-admin",
"https://tailscale.com/cap/ssh", "debug-disable-upnp",
},
},
wantErr: false,
},
{
name: "minimal-node",
node: &types.Node{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
ID: 0,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{
netip.MustParseAddr("100.64.0.1"),
},
@@ -82,8 +103,7 @@ func TestTailNode(t *testing.T) {
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
@@ -133,10 +153,9 @@ func TestTailNode(t *testing.T) {
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
@@ -145,14 +164,13 @@ func TestTailNode(t *testing.T) {
},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
Capabilities: []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
tailcfg.NodeAttrDisableUPnP,
},
},
wantErr: false,
@@ -166,9 +184,11 @@ func TestTailNode(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got, err := tailNode(
tt.node,
0,
tt.pol,
tt.dnsConfig,
tt.baseDomain,
false,
)
if (err != nil) != tt.wantErr {

View File

@@ -1,11 +1,14 @@
package notifier
import (
"fmt"
"strings"
"sync"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/types/key"
)
type Notifier struct {
@@ -17,9 +20,9 @@ func NewNotifier() *Notifier {
return &Notifier{}
}
func (n *Notifier) AddNode(machineKey string, c chan<- types.StateUpdate) {
log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to add node")
defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to add node")
func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) {
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node")
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to add node")
n.l.Lock()
defer n.l.Unlock()
@@ -28,17 +31,17 @@ func (n *Notifier) AddNode(machineKey string, c chan<- types.StateUpdate) {
n.nodes = make(map[string]chan<- types.StateUpdate)
}
n.nodes[machineKey] = c
n.nodes[machineKey.String()] = c
log.Trace().
Str("machine_key", machineKey).
Str("machine_key", machineKey.ShortString()).
Int("open_chans", len(n.nodes)).
Msg("Added new channel")
}
func (n *Notifier) RemoveNode(machineKey string) {
log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to remove node")
defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to remove node")
func (n *Notifier) RemoveNode(machineKey key.MachinePublic) {
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node")
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to remove node")
n.l.Lock()
defer n.l.Unlock()
@@ -47,14 +50,27 @@ func (n *Notifier) RemoveNode(machineKey string) {
return
}
delete(n.nodes, machineKey)
delete(n.nodes, machineKey.String())
log.Trace().
Str("machine_key", machineKey).
Str("machine_key", machineKey.ShortString()).
Int("open_chans", len(n.nodes)).
Msg("Removed channel")
}
// IsConnected reports if a node is connected to headscale and has a
// poll session open.
func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool {
n.l.RLock()
defer n.l.RUnlock()
if _, ok := n.nodes[machineKey.String()]; ok {
return true
}
return false
}
func (n *Notifier) NotifyAll(update types.StateUpdate) {
n.NotifyWithIgnore(update)
}
@@ -78,3 +94,31 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string)
c <- update
}
}
func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) {
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
defer log.Trace().
Caller().
Interface("type", update.Type).
Msg("releasing lock, finished notifing")
n.l.RLock()
defer n.l.RUnlock()
if c, ok := n.nodes[mKey.String()]; ok {
c <- update
}
}
func (n *Notifier) String() string {
n.l.RLock()
defer n.l.RUnlock()
str := []string{"Notifier, in map:\n"}
for k, v := range n.nodes {
str = append(str, fmt.Sprintf("\t%s: %v\n", k, v))
}
return strings.Join(str, "")
}

View File

@@ -90,42 +90,28 @@ func (h *Headscale) determineTokenExpiration(idTokenExpiration time.Time) time.T
// RegisterOIDC redirects to the OIDC provider for authentication
// Puts NodeKey in cache so the callback can retrieve it using the oidc state param
// Listens in /oidc/register/:nKey.
// Listens in /oidc/register/:mKey.
func (h *Headscale) RegisterOIDC(
writer http.ResponseWriter,
req *http.Request,
) {
vars := mux.Vars(req)
nodeKeyStr, ok := vars["nkey"]
machineKeyStr, ok := vars["mkey"]
log.Debug().
Caller().
Str("node_key", nodeKeyStr).
Str("machine_key", machineKeyStr).
Bool("ok", ok).
Msg("Received oidc register call")
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
util.LogErr(err, "Failed to write response")
}
return
}
// We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error.
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText(
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(machineKeyStr),
)
if !ok || nodeKeyStr == "" || err != nil {
if err != nil {
log.Warn().
Err(err).
Msg("Failed to parse incoming nodekey in OIDC registration")
@@ -154,7 +140,7 @@ func (h *Headscale) RegisterOIDC(
// place the node key into the state cache, so it can be retrieved later
h.registrationCache.Set(
stateStr,
util.NodePublicKeyStripPrefix(nodeKey),
machineKey,
registerCacheExpiration,
)
@@ -232,7 +218,7 @@ func (h *Headscale) OIDCCallback(
return
}
nodeKey, nodeExists, err := h.validateNodeForOIDCCallback(
machineKey, nodeExists, err := h.validateNodeForOIDCCallback(
writer,
state,
claims,
@@ -255,7 +241,7 @@ func (h *Headscale) OIDCCallback(
return
}
if err := h.registerNodeForOIDCCallback(writer, user, nodeKey, idTokenExpiry); err != nil {
if err := h.registerNodeForOIDCCallback(writer, user, machineKey, idTokenExpiry); err != nil {
return
}
@@ -462,10 +448,10 @@ func (h *Headscale) validateNodeForOIDCCallback(
state string,
claims *IDTokenClaims,
expiry time.Time,
) (*key.NodePublic, bool, error) {
) (*key.MachinePublic, bool, error) {
// retrieve nodekey from state cache
nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
if !nodeKeyFound {
machineKeyIf, machineKeyFound := h.registrationCache.Get(state)
if !machineKeyFound {
log.Trace().
Msg("requested node state key expired before authorisation completed")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -478,11 +464,12 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, false, errOIDCNodeKeyMissing
}
var nodeKey key.NodePublic
nodeKeyFromCache, nodeKeyOK := nodeKeyIf.(string)
if !nodeKeyOK {
var machineKey key.MachinePublic
machineKey, machineKeyOK := machineKeyIf.(key.MachinePublic)
if !machineKeyOK {
log.Trace().
Msg("requested node state key is not a string")
Interface("got", machineKeyIf).
Msg("requested node state key is not a nodekey")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("state is invalid"))
@@ -493,29 +480,11 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, false, errOIDCInvalidNodeState
}
err := nodeKey.UnmarshalText(
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyFromCache)),
)
if err != nil {
log.Error().
Str("nodeKey", nodeKeyFromCache).
Bool("nodeKeyOK", nodeKeyOK).
Msg("could not parse node public key")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, werr := writer.Write([]byte("could not parse node public key"))
if werr != nil {
util.LogErr(err, "Failed to write response")
}
return nil, false, err
}
// retrieve node information if it exist
// The error is not important, because if it does not
// exist, then this is a new node and we will move
// on to registration.
node, _ := h.db.GetNodeByNodeKey(nodeKey)
node, _ := h.db.GetNodeByMachineKey(machineKey)
if node != nil {
log.Trace().
@@ -570,7 +539,7 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, true, nil
}
return &nodeKey, false, nil
return &machineKey, false, nil
}
func getUserName(
@@ -641,13 +610,13 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
func (h *Headscale) registerNodeForOIDCCallback(
writer http.ResponseWriter,
user *types.User,
nodeKey *key.NodePublic,
machineKey *key.MachinePublic,
expiry time.Time,
) error {
if _, err := h.db.RegisterNodeFromAuthCallback(
// TODO(kradalby): find a better way to use the cache across modules
h.registrationCache,
nodeKey.String(),
*machineKey,
user.Name,
&expiry,
util.RegisterMethodOIDC,

View File

@@ -250,6 +250,21 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
if node.IPAddresses.InIPSet(expanded) {
dests = append(dests, dest)
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo != nil {
// TODO(kradalby): Evaluate if we should only keep
// the routes if the route is enabled. This will
// require database access in this part of the code.
if len(node.Hostinfo.RoutableIPs) > 0 {
for _, routableIP := range node.Hostinfo.RoutableIPs {
if expanded.ContainsPrefix(routableIP) {
dests = append(dests, dest)
}
}
}
}
}
if len(dests) > 0 {
@@ -596,10 +611,13 @@ func excludeCorrectlyTaggedNodes(
}
// for each node if tag is in tags list, don't append it.
for _, node := range nodes {
hi := node.GetHostInfo()
found := false
for _, t := range hi.RequestTags {
if node.Hostinfo == nil {
continue
}
for _, t := range node.Hostinfo.RequestTags {
if util.StringOrPrefixListContains(tags, t) {
found = true
@@ -671,14 +689,18 @@ func expandOwnersFromTag(
pol *ACLPolicy,
tag string,
) ([]string, error) {
noTagErr := fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
ErrInvalidTag,
tag,
)
if pol == nil {
return []string{}, noTagErr
}
var owners []string
ows, ok := pol.TagOwners[tag]
if !ok {
return []string{}, fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
ErrInvalidTag,
tag,
)
return []string{}, noTagErr
}
for _, owner := range ows {
if isGroup(owner) {
@@ -787,8 +809,11 @@ func (pol *ACLPolicy) expandIPsFromTag(
for _, user := range owners {
nodes := filterNodesByUser(nodes, user)
for _, node := range nodes {
hi := node.GetHostInfo()
if util.StringOrPrefixListContains(hi.RequestTags, alias) {
if node.Hostinfo == nil {
continue
}
if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) {
node.IPAddresses.AppendToIPSet(&build)
}
}
@@ -882,7 +907,7 @@ func (pol *ACLPolicy) TagsOfNode(
validTagMap := make(map[string]bool)
invalidTagMap := make(map[string]bool)
for _, tag := range node.HostInfo.RequestTags {
for _, tag := range node.Hostinfo.RequestTags {
owners, err := expandOwnersFromTag(pol, tag)
if errors.Is(err, ErrInvalidTag) {
invalidTagMap[tag] = true

View File

@@ -16,10 +16,6 @@ import (
"tailscale.com/tailcfg"
)
var ipComparer = cmp.Comparer(func(x, y netip.Addr) bool {
return x.Compare(y) == 0
})
func Test(t *testing.T) {
check.TestingT(t)
}
@@ -401,6 +397,7 @@ acls:
User: types.User{
Name: "testuser",
},
Hostinfo: &tailcfg.Hostinfo{},
},
})
@@ -951,7 +948,7 @@ func Test_listNodesInUser(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
got := filterNodesByUser(test.args.nodes, test.args.user)
if diff := cmp.Diff(test.want, got); diff != "" {
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" {
t.Errorf("listNodesInUser() = (-want +got):\n%s", diff)
}
})
@@ -1247,7 +1244,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"},
@@ -1258,7 +1255,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"},
@@ -1388,7 +1385,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"},
@@ -1426,7 +1423,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1437,7 +1434,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1447,13 +1444,15 @@ func Test_expandAlias(t *testing.T) {
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.3"),
},
User: types.User{Name: "marc"},
User: types.User{Name: "marc"},
Hostinfo: &tailcfg.Hostinfo{},
},
&types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
},
@@ -1503,7 +1502,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1514,7 +1513,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1524,7 +1523,8 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
user: "joe",
@@ -1533,6 +1533,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
},
@@ -1553,7 +1554,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1564,7 +1565,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1574,7 +1575,8 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
user: "joe",
@@ -1583,6 +1585,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
},
@@ -1598,7 +1601,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"},
@@ -1610,12 +1613,14 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
},
User: types.User{Name: "joe"},
ForcedTags: []string{"tag:accountant-webserver"},
Hostinfo: &tailcfg.Hostinfo{},
},
&types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
user: "joe",
@@ -1624,6 +1629,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
},
@@ -1639,7 +1645,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "hr-web1",
RequestTags: []string{"tag:hr-webserver"},
@@ -1650,7 +1656,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "hr-web2",
RequestTags: []string{"tag:hr-webserver"},
@@ -1660,7 +1666,8 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
user: "joe",
@@ -1671,7 +1678,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "hr-web1",
RequestTags: []string{"tag:hr-webserver"},
@@ -1682,7 +1689,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"),
},
User: types.User{Name: "joe"},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
OS: "centos",
Hostname: "hr-web2",
RequestTags: []string{"tag:hr-webserver"},
@@ -1692,7 +1699,8 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"),
},
User: types.User{Name: "joe"},
User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
},
},
},
@@ -1704,7 +1712,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
test.args.nodes,
test.args.user,
)
if diff := cmp.Diff(test.want, got, ipComparer); diff != "" {
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" {
t.Errorf("excludeCorrectlyTaggedNodes() (-want +got):\n%s", diff)
}
})
@@ -1893,6 +1901,81 @@ func TestReduceFilterRules(t *testing.T) {
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: ACLPolicy{
Groups: Groups{
"group:admins": {"user1"},
},
ACLs: []ACL{
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"group:admins:*"},
},
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"10.33.0.0/16:*"},
},
},
},
node: &types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.1"),
netip.MustParseAddr("fd7a:115c:a1e0::1"),
},
User: types.User{Name: "user1"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.2"),
netip.MustParseAddr("fd7a:115c:a1e0::2"),
},
User: types.User{Name: "user1"},
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
},
},
},
}
for _, tt := range tests {
@@ -1935,7 +2018,7 @@ func Test_getTags(t *testing.T) {
User: types.User{
Name: "joe",
},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:valid"},
},
},
@@ -1955,7 +2038,7 @@ func Test_getTags(t *testing.T) {
User: types.User{
Name: "joe",
},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:valid", "tag:invalid"},
},
},
@@ -1975,7 +2058,7 @@ func Test_getTags(t *testing.T) {
User: types.User{
Name: "joe",
},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{
"tag:invalid",
"tag:valid",
@@ -1999,7 +2082,7 @@ func Test_getTags(t *testing.T) {
User: types.User{
Name: "joe",
},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:invalid", "very-invalid"},
},
},
@@ -2015,7 +2098,7 @@ func Test_getTags(t *testing.T) {
User: types.User{
Name: "joe",
},
HostInfo: types.HostInfo{
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:invalid", "very-invalid"},
},
},
@@ -2056,10 +2139,6 @@ func Test_getTags(t *testing.T) {
}
func Test_getFilteredByACLPeers(t *testing.T) {
ipComparer := cmp.Comparer(func(x, y netip.Addr) bool {
return x.Compare(y) == 0
})
type args struct {
nodes types.Nodes
rules []tailcfg.FilterRule
@@ -2723,7 +2802,7 @@ func Test_getFilteredByACLPeers(t *testing.T) {
tt.args.nodes,
tt.args.rules,
)
if diff := cmp.Diff(tt.want, got, ipComparer); diff != "" {
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff)
}
})
@@ -2986,9 +3065,6 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
node := &types.Node{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 0,
@@ -2996,7 +3072,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
pol := &ACLPolicy{
@@ -3041,9 +3117,6 @@ func TestInvalidTagValidUser(t *testing.T) {
node := &types.Node{
ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1,
@@ -3051,7 +3124,7 @@ func TestInvalidTagValidUser(t *testing.T) {
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
pol := &ACLPolicy{
@@ -3095,9 +3168,6 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
node := &types.Node{
ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1,
@@ -3105,7 +3175,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
pol := &ACLPolicy{
@@ -3159,9 +3229,6 @@ func TestValidTagInvalidUser(t *testing.T) {
node := &types.Node{
ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "webserver",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1,
@@ -3169,7 +3236,7 @@ func TestValidTagInvalidUser(t *testing.T) {
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
HostInfo: types.HostInfo(hostInfo),
Hostinfo: &hostInfo,
}
hostInfo2 := tailcfg.Hostinfo{
@@ -3179,9 +3246,6 @@ func TestValidTagInvalidUser(t *testing.T) {
nodes2 := &types.Node{
ID: 2,
MachineKey: "56789",
NodeKey: "bar2",
DiscoKey: "faab",
Hostname: "user",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")},
UserID: 1,
@@ -3189,7 +3253,7 @@ func TestValidTagInvalidUser(t *testing.T) {
Name: "user1",
},
RegisterMethod: util.RegisterMethodAuthKey,
HostInfo: types.HostInfo(hostInfo2),
Hostinfo: &hostInfo2,
}
pol := &ACLPolicy{

View File

@@ -8,8 +8,8 @@ import (
"github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
xslices "golang.org/x/exp/slices"
"tailscale.com/tailcfg"
)
@@ -26,35 +26,32 @@ type UpdateNode func()
func logPollFunc(
mapRequest tailcfg.MapRequest,
node *types.Node,
isNoise bool,
) (func(string), func(error, string)) {
return func(msg string) {
log.Info().
Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey).
Str("node_key", node.NodeKey.ShortString()).
Str("node", node.Hostname).
Msg(msg)
},
func(err error, msg string) {
log.Error().
Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey).
Str("node_key", node.NodeKey.ShortString()).
Str("node", node.Hostname).
Err(err).
Msg(msg)
}
}
// handlePoll is the common code for the legacy and Noise protocols to
// managed the poll loop.
// handlePoll ensures the node gets the appropriate updates from either
// polling or immediate responses.
//
//nolint:gocyclo
func (h *Headscale) handlePoll(
@@ -62,11 +59,10 @@ func (h *Headscale) handlePoll(
ctx context.Context,
node *types.Node,
mapRequest tailcfg.MapRequest,
isNoise bool,
) {
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
logInfo, logErr := logPollFunc(mapRequest, node)
// This is the mechanism where the node gives us inforamtion about its
// This is the mechanism where the node gives us information about its
// current configuration.
//
// If OmitPeers is true, Stream is false, and ReadOnly is false,
@@ -74,24 +70,118 @@ func (h *Headscale) handlePoll(
// breaking existing long-polling (Stream == true) connections.
// In this case, the server can omit the entire response; the client
// only checks the HTTP response status code.
// TODO(kradalby): remove ReadOnly when we only support capVer 68+
if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly {
log.Info().
Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey).
Str("node_key", node.NodeKey.ShortString()).
Str("node", node.Hostname).
Strs("endpoints", node.Endpoints).
Msg("Received endpoint update")
Int("cap_ver", int(mapRequest.Version)).
Msg("Received update")
now := time.Now().UTC()
node.LastSeen = &now
node.Hostname = mapRequest.Hostinfo.Hostname
node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
node.Endpoints = mapRequest.Endpoints
change := node.PeerChangeFromMapRequest(mapRequest)
online := h.nodeNotifier.IsConnected(node.MachineKey)
change.Online = &online
node.ApplyPeerChange(&change)
hostInfoChange := node.Hostinfo.Equal(mapRequest.Hostinfo)
logTracePeerChange(node.Hostname, hostInfoChange, &change)
// Check if the Hostinfo of the node has changed.
// If it has changed, check if there has been a change tod
// the routable IPs of the host and update update them in
// the database. Then send a Changed update
// (containing the whole node object) to peers to inform about
// the route change.
// If the hostinfo has changed, but not the routes, just update
// hostinfo and let the function continue.
if !hostInfoChange {
oldRoutes := node.Hostinfo.RoutableIPs
newRoutes := mapRequest.Hostinfo.RoutableIPs
oldServicesCount := len(node.Hostinfo.Services)
newServicesCount := len(mapRequest.Hostinfo.Services)
node.Hostinfo = mapRequest.Hostinfo
sendUpdate := false
// Route changes come as part of Hostinfo, which means that
// when an update comes, the Node Route logic need to run.
// This will require a "change" in comparison to a "patch",
// which is more costly.
if !xslices.Equal(oldRoutes, newRoutes) {
var err error
sendUpdate, err = h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
http.Error(writer, "", http.StatusInternalServerError)
return
}
if h.ACLPolicy != nil {
// update routes with peer information
err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node)
if err != nil {
logErr(err, "Error running auto approved routes")
}
}
}
// Services is mostly useful for discovery and not critical,
// except for peerapi, which is how nodes talk to eachother.
// If peerapi was not part of the initial mapresponse, we
// need to make sure its sent out later as it is needed for
// Taildrop.
// TODO(kradalby): Length comparison is a bit naive, replace.
if oldServicesCount != newServicesCount {
sendUpdate = true
}
if sendUpdate {
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
// Send an update to all peers to propagate the new routes
// available.
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from handlePoll -> update -> new hostinfo",
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(
stateUpdate,
node.MachineKey.String())
}
// Send an update to the node itself with to ensure it
// has an updated packetfilter allowing the new route
// if it is defined in the ACL.
selfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if selfUpdate.Valid() {
h.nodeNotifier.NotifyByMachineKey(
selfUpdate,
node.MachineKey)
}
return
}
}
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
@@ -100,20 +190,15 @@ func (h *Headscale) handlePoll(
return
}
err := h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
http.Error(writer, "", http.StatusInternalServerError)
return
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{&change},
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(
stateUpdate,
node.MachineKey.String())
}
h.nodeNotifier.NotifyWithIgnore(
types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
},
node.MachineKey)
writer.WriteHeader(http.StatusOK)
if f, ok := writer.(http.Flusher); ok {
@@ -121,7 +206,7 @@ func (h *Headscale) handlePoll(
}
return
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
// ReadOnly is whether the client just wants to fetch the
// MapResponse, without updating their Endpoints. The
// Endpoints field will be ignored and LastSeen will not be
@@ -130,7 +215,7 @@ func (h *Headscale) handlePoll(
// The intended use is for clients to discover the DERP map at
// start-up before their first real endpoint update.
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
h.handleLiteRequest(writer, node, mapRequest, isNoise)
h.handleLiteRequest(writer, node, mapRequest)
return
} else if mapRequest.OmitPeers && mapRequest.Stream {
@@ -139,12 +224,39 @@ func (h *Headscale) handlePoll(
return
}
now := time.Now().UTC()
node.LastSeen = &now
node.Hostname = mapRequest.Hostinfo.Hostname
node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
node.Endpoints = mapRequest.Endpoints
change := node.PeerChangeFromMapRequest(mapRequest)
// A stream is being set up, the node is Online
online := true
change.Online = &online
node.ApplyPeerChange(&change)
// Only save HostInfo if changed, update routes if changed
// TODO(kradalby): Remove when capver is over 68
if !node.Hostinfo.Equal(mapRequest.Hostinfo) {
oldRoutes := node.Hostinfo.RoutableIPs
newRoutes := mapRequest.Hostinfo.RoutableIPs
node.Hostinfo = mapRequest.Hostinfo
if !xslices.Equal(oldRoutes, newRoutes) {
_, err := h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
http.Error(writer, "", http.StatusInternalServerError)
return
}
}
}
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
// When a node connects to control, list the peers it has at
// that given point, further updates are kept in memory in
@@ -158,11 +270,14 @@ func (h *Headscale) handlePoll(
return
}
for _, peer := range peers {
online := h.nodeNotifier.IsConnected(peer.MachineKey)
peer.IsOnline = &online
}
mapp := mapper.NewMapper(
node,
peers,
h.privateKey2019,
isNoise,
h.DERPMap,
h.cfg.BaseDomain,
h.cfg.DNSConfig,
@@ -170,11 +285,6 @@ func (h *Headscale) handlePoll(
h.cfg.RandomizeClientPort,
)
err = h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
}
// update ACLRules with peer informations (to update server tags if necessary)
if h.ACLPolicy != nil {
// update routes with peer information
@@ -184,14 +294,6 @@ func (h *Headscale) handlePoll(
}
}
// TODO(kradalby): Save specific stuff, not whole object.
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
logInfo("Sending initial map")
mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
@@ -216,18 +318,26 @@ func (h *Headscale) handlePoll(
return
}
h.nodeNotifier.NotifyWithIgnore(
types.StateUpdate{
Type: types.StatePeerChanged,
Changed: types.Nodes{node},
},
node.MachineKey)
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from handlePoll -> new node added",
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(
stateUpdate,
node.MachineKey.String())
}
// Set up the client stream
h.pollNetMapStreamWG.Add(1)
defer h.pollNetMapStreamWG.Done()
updateChan := make(chan types.StateUpdate)
// Use a buffered channel in case a node is not fully ready
// to receive a message to make sure we dont block the entire
// notifier.
// 12 is arbitrarily chosen.
updateChan := make(chan types.StateUpdate, 12)
defer closeChanWithLog(updateChan, node.Hostname, "updateChan")
// Register the node's update channel
@@ -241,6 +351,10 @@ func (h *Headscale) handlePoll(
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if len(node.Routes) > 0 {
go h.db.EnsureFailoverRouteIsAvailable(node)
}
for {
logInfo("Waiting for update on stream channel")
select {
@@ -270,14 +384,7 @@ func (h *Headscale) handlePoll(
// One alternative is to split these different channels into
// goroutines, but then you might have a problem without a lock
// if a keepalive is written at the same time as an update.
go func() {
err = h.db.UpdateLastSeen(node)
if err != nil {
logErr(err, "Cannot update node LastSeen")
return
}
}()
go h.updateNodeOnlineStatus(true, node)
case update := <-updateChan:
logInfo("Received update")
@@ -286,19 +393,54 @@ func (h *Headscale) handlePoll(
var data []byte
var err error
// Ensure the node object is updated, for example, there
// might have been a hostinfo update in a sidechannel
// which contains data needed to generate a map response.
node, err = h.db.GetNodeByMachineKey(node.MachineKey)
if err != nil {
logErr(err, "Could not get machine from db")
return
}
switch update.Type {
case types.StateFullUpdate:
logInfo("Sending Full MapResponse")
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
case types.StatePeerChanged:
logInfo("Sending PeerChanged MapResponse")
data, err = mapp.PeerChangedResponse(mapRequest, node, update.Changed, h.ACLPolicy)
logInfo(fmt.Sprintf("Sending Changed MapResponse: %s", update.Message))
for _, node := range update.ChangeNodes {
// If a node is not reported to be online, it might be
// because the value is outdated, check with the notifier.
// However, if it is set to Online, and not in the notifier,
// this might be because it has announced itself, but not
// reached the stage to actually create the notifier channel.
if node.IsOnline != nil && !*node.IsOnline {
isOnline := h.nodeNotifier.IsConnected(node.MachineKey)
node.IsOnline = &isOnline
}
}
data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message)
case types.StatePeerChangedPatch:
logInfo("Sending PeerChangedPatch MapResponse")
data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy)
case types.StatePeerRemoved:
logInfo("Sending PeerRemoved MapResponse")
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
case types.StateSelfUpdate:
if len(update.ChangeNodes) == 1 {
logInfo("Sending SelfUpdate MapResponse")
node = update.ChangeNodes[0]
data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy)
} else {
logInfo("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.")
}
case types.StateDERPUpdated:
logInfo("Sending DERPUpdate MapResponse")
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)
case types.StateFullUpdate:
logInfo("Sending Full MapResponse")
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
}
if err != nil {
@@ -307,55 +449,45 @@ func (h *Headscale) handlePoll(
return
}
_, err = writer.Write(data)
if err != nil {
logErr(err, "Could not write the map response")
updateRequestsSentToNode.WithLabelValues(node.User.Name, node.Hostname, "failed").
Inc()
return
}
if flusher, ok := writer.(http.Flusher); ok {
flusher.Flush()
} else {
log.Error().Msg("Failed to create http flusher")
return
}
// See comment in keepAliveTicker
go func() {
err = h.db.UpdateLastSeen(node)
// Only send update if there is change
if data != nil {
_, err = writer.Write(data)
if err != nil {
logErr(err, "Cannot update node LastSeen")
logErr(err, "Could not write the map response")
updateRequestsSentToNode.WithLabelValues(node.User.Name, node.Hostname, "failed").
Inc()
return
}
}()
log.Info().
Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey).
Str("node", node.Hostname).
TimeDiff("timeSpent", time.Now(), now).
Msg("update sent")
if flusher, ok := writer.(http.Flusher); ok {
flusher.Flush()
} else {
log.Error().Msg("Failed to create http flusher")
return
}
log.Info().
Caller().
Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey.ShortString()).
Str("machine_key", node.MachineKey.ShortString()).
Str("node", node.Hostname).
TimeDiff("timeSpent", time.Now(), now).
Msg("update sent")
}
case <-ctx.Done():
logInfo("The client has closed the connection")
go func() {
err = h.db.UpdateLastSeen(node)
if err != nil {
logErr(err, "Cannot update node LastSeen")
go h.updateNodeOnlineStatus(false, node)
return
}
}()
// Failover the node's routes if any.
go h.db.FailoverNodeRoutesWithNotify(node)
// The connection has been closed, so we can stop polling.
return
@@ -368,6 +500,36 @@ func (h *Headscale) handlePoll(
}
}
// updateNodeOnlineStatus records the last seen status of a node and notifies peers
// about change in their online/offline status.
// It takes a StateUpdateType of either StatePeerOnlineChanged or StatePeerOfflineChanged.
func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) {
now := time.Now()
node.LastSeen = &now
statusUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
Online: &online,
LastSeen: &now,
},
},
}
if statusUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(statusUpdate, node.MachineKey.String())
}
err := h.db.UpdateLastSeen(node)
if err != nil {
log.Error().Err(err).Msg("Cannot update node LastSeen")
return
}
}
func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) {
log.Trace().
Str("handler", "PollNetMap").
@@ -382,17 +544,12 @@ func (h *Headscale) handleLiteRequest(
writer http.ResponseWriter,
node *types.Node,
mapRequest tailcfg.MapRequest,
isNoise bool,
) {
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
logInfo, logErr := logPollFunc(mapRequest, node)
mapp := mapper.NewMapper(
node,
// TODO(kradalby): It might not be acceptable to send
// an empty peer list here.
types.Nodes{},
h.privateKey2019,
isNoise,
h.DERPMap,
h.cfg.BaseDomain,
h.cfg.DNSConfig,
@@ -417,3 +574,38 @@ func (h *Headscale) handleLiteRequest(
logErr(err, "Failed to write response")
}
}
func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) {
trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname)
if change.Key != nil {
trace = trace.Str("node_key", change.Key.ShortString())
}
if change.DiscoKey != nil {
trace = trace.Str("disco_key", change.DiscoKey.ShortString())
}
if change.Online != nil {
trace = trace.Bool("online", *change.Online)
}
if change.Endpoints != nil {
eps := make([]string, len(change.Endpoints))
for idx, ep := range change.Endpoints {
eps[idx] = ep.String()
}
trace = trace.Strs("endpoints", eps)
}
if hostinfoChange {
trace = trace.Bool("hostinfo_changed", hostinfoChange)
}
if change.DERPRegion != 0 {
trace = trace.Int("derp_region", change.DERPRegion)
}
trace.Time("last_seen", *change.LastSeen).Msg("PeerChange received")
}

Some files were not shown because too many files have changed in this diff Show More