Compare commits

...

43 Commits

Author SHA1 Message Date
Motiejus Jakštys
bafb6791d3 oidc: allow reading the client secret from a file
Currently the most "secret" way to specify the oidc client secret is via
an environment variable `OIDC_CLIENT_SECRET`, which is problematic[1].
Lets allow reading oidc client secret from a file. For extra convenience
the path to the secret will resolve the environment variables.

[1]: https://systemd.io/CREDENTIALS/
2023-01-14 17:03:57 +01:00
Motiejus Jakštys
6edac4863a Makefile: remove a missing target
test_integration_oidc was removed in 0525bea593
2023-01-14 13:42:48 +01:00
Even Holthe
e27e01c09f nodes list: expose expiration time 2023-01-12 13:43:21 +01:00
Even Holthe
dd173ecc1f Refresh machines with correct new expiry 2023-01-12 13:43:21 +01:00
Kristoffer Dalby
8ca0fb7ed0 update ip_prefixes docs
we cant actually have arbitrary ip ranges, add a note about that.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-12 11:39:39 +01:00
Juan Font
6c714e88ee Added entry for performance improvements in ACLs 2023-01-11 08:58:03 +01:00
Allen
a6c8718a97 ToStringSlice will lead to high CPU usage, early conversion can reduce cpu usage 2023-01-11 08:45:54 +01:00
Even Holthe
26282b7a54 Fix SIGSEGV crash related to map of state changes
See https://github.com/juanfont/headscale/issues/1114#issuecomment-1373698441
2023-01-10 22:26:21 +01:00
Kristoffer Dalby
93aca81c1c Read integration test config from Env
This commit sets the Headscale config from env instead of file for
integration tests, the main point is to make sure that when we add per
test config, it properly replaces the config key and not append it or
something similar.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 23:06:43 +01:00
Kristoffer Dalby
81254cdf7a Limit run regex for generated workflows
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 18:36:31 +01:00
Kristoffer Dalby
b3a0c4a63b Add integration readme
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 12:32:24 +01:00
Kristoffer Dalby
376235c9de make prettier ignore generated test flows
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 12:32:24 +01:00
Kristoffer Dalby
7274fdacc6 Generate github action jobs for integration tests
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 12:32:24 +01:00
Kristoffer Dalby
91c1f54b49 Remove "run all v2 job"
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 12:32:24 +01:00
Kristoffer Dalby
efd0f79fbc Add script to generate integration test gitjobs
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2023-01-06 12:32:24 +01:00
Juan Font
2084464225 Update CHANGELOG.md
Co-authored-by: Kristoffer Dalby <kristoffer@dalby.cc>
2023-01-05 14:59:02 +01:00
Juan Font
66ebbf3ecb Preload AuthKey in machine getters 2023-01-05 14:59:02 +01:00
Juan Font
55a3885614 Added integration tests for ephemeral nodes
Fetch the machines from headscale
2023-01-05 14:59:02 +01:00
Juan Font
afae1ff7b6 Delete ephemeral machines on logout
Update changelog

Use dedicated method to delete
2023-01-05 14:59:02 +01:00
Juan Font
4de49f5f49 Add isEphemeral() method to Machine 2023-01-05 14:59:02 +01:00
Even Holthe
6db9656008 oidc: update changelog 2023-01-04 09:23:52 +01:00
Even Holthe
fecb13b24b oidc: add basic docs 2023-01-04 09:23:52 +01:00
Even Holthe
23a595c26f oidc: add test for expiring nodes after token expiration 2023-01-04 09:23:52 +01:00
Even Holthe
085912cfb4 expire machines after db expiry 2023-01-04 09:23:52 +01:00
Even Holthe
7157e14aff add expiration from OIDC token to machine 2023-01-04 09:23:52 +01:00
Allen
4e2c4f92d3 reflect.DeepEqual is a value copy that causes golang to continuously allocate memory 2023-01-03 18:09:18 +01:00
Juan Font
893b0de8fa Added tests on allowedip field for routing 2023-01-03 13:34:55 +01:00
Juan Font
9b98c3b79f Send in AllowedIPs both primary routes AND enabled exit routes 2023-01-03 13:34:55 +01:00
Even Holthe
6de26b1d7c Remove Tailscale v1.18.2 from test matrix 2023-01-02 16:06:12 +01:00
Christian Heusel
1f1931fb00 fix spelling mistakes 2023-01-01 22:45:16 +01:00
Christian Heusel
1f4efbcd3b add changelog entry 2023-01-01 22:45:16 +01:00
Christian Heusel
711fe1d806 enumerate the config 2023-01-01 22:45:16 +01:00
Christian Heusel
e2c62a7b0c document how to add new DNS records via extra_records 2023-01-01 22:45:16 +01:00
Christian Heusel
ab6565723e add the possibility for custom DNS records
related to https://github.com/juanfont/headscale/issues/762

Co-Authored-By: Jamie Greeff <jamie@greeff.me>
Signed-off-by: Christian Heusel <christian@heusel.eu>
2023-01-01 22:45:16 +01:00
John Axel Eriksson
7bb6f1a7eb domains/restricted_nameservers: check dnsConfig.Resolvers instead of dnsConfig.Nameservers 2022-12-31 19:06:32 +01:00
Avirut Mehta
549b82df11 Add Caddy instructions to reverse_proxy.md 2022-12-27 23:08:34 +01:00
Marc
036cdf922f templates: fix typo "custm" -> "custom" 2022-12-27 12:02:33 +01:00
jimyag
b4ff22935c update macos check 2022-12-25 15:45:45 +01:00
ma6174
5feadbf3fc fix goroutine leak 2022-12-25 14:11:16 +01:00
Juan Font
3e9ee816f9 Add integration tests for logout with authkey 2022-12-22 20:02:18 +01:00
Juan Font
2494e27a73 Make WaitForTailscaleLogout a Scenario method 2022-12-22 20:02:18 +01:00
Juan Font
8e8b65bb84 Add ko-fi sponsor button 2022-12-22 17:25:49 +01:00
Juan Font
b7d7fc57c4 Add logout method to tsic 2022-12-22 00:09:21 +01:00
58 changed files with 2228 additions and 137 deletions

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
# These are supported funding model platforms
ko_fi: headscale

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthKeyLogoutAndRelogin
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthKeyLogoutAndRelogin$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowAuthenticationPingAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowAuthenticationPingAll$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowLogoutAndRelogin
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowLogoutAndRelogin$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestCreateTailscale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestCreateTailscale$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnablingRoutes
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnablingRoutes$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHeadscale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHeadscale$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNamespaceCommand
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNamespaceCommand$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCAuthenticationPingAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCAuthenticationPingAll$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCExpireNodes
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCExpireNodes$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByHostname
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByHostname$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByIP
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByIP$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommand
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommand$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandReusableEphemeral
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandReusableEphemeral$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandWithoutExpiry
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandWithoutExpiry$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestResolveMagicDNS
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestResolveMagicDNS$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHIsBlockedInACL
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHIsBlockedInACL$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHMultipleNamespacesAllToAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHMultipleNamespacesAllToAll$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHNoSSHConfigured
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHNoSSHConfigured$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHOneNamespaceAllToAll
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHOneNamespaceAllToAll$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSNamespaceOnlyIsolation
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSNamespaceOnlyIsolation$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTaildrop
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTaildrop$"

View File

@@ -0,0 +1,47 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTailscaleNodesJoiningHeadcale
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTailscaleNodesJoiningHeadcale$"

View File

@@ -1,35 +0,0 @@
name: Integration Test v2 - kradalby
on: [pull_request]
jobs:
integration-test-v2-kradalby:
runs-on: [self-hosted, linux, x64, nixos, docker]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
# We currently only run one job at the time, so make sure
# we just wipe all containers and all networks.
- name: Clean up Docker
if: steps.changed-files.outputs.any_changed == 'true'
run: |
docker kill $(docker ps -q) || true
docker network prune -f || true
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_v2_general

1
.prettierignore Normal file
View File

@@ -0,0 +1 @@
.github/workflows/test-integration-v2*

View File

@@ -9,6 +9,11 @@
- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)
- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)
- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)
- Added config option for custom DNS records [#1035](https://github.com/juanfont/headscale/pull/1035)
- Expire nodes based on OIDC token expiry [#1067](https://github.com/juanfont/headscale/pull/1067)
- Remove ephemeral nodes on logout [#1098](https://github.com/juanfont/headscale/pull/1098)
- Performance improvements in ACLs [#1129](https://github.com/juanfont/headscale/pull/1129)
- OIDC client secret can be passed via a file [#1127](https://github.com/juanfont/headscale/pull/1127)
## 0.17.1 (2022-12-05)

View File

@@ -26,7 +26,7 @@ dev: lint test build
test:
@go test $(TAGS) -short -coverprofile=coverage.out ./...
test_integration: test_integration_cli test_integration_derp test_integration_oidc test_integration_v2_general
test_integration: test_integration_cli test_integration_derp test_integration_v2_general
test_integration_cli:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true

61
app.go
View File

@@ -162,6 +162,7 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
aclRules: tailcfg.FilterAllowAll, // default allowall
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
lastStateChange: xsync.NewMapOf[time.Time](),
}
err = app.initDB()
@@ -217,6 +218,15 @@ func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
}
}
// expireExpiredMachines expires machines that have an explicit expiry set
// after that expiry time has passed.
func (h *Headscale) expireExpiredMachines(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
h.expireExpiredMachinesWorker()
}
}
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
@@ -248,8 +258,7 @@ func (h *Headscale) expireEphemeralNodesWorker() {
expiredFound := false
for _, machine := range machines {
if machine.AuthKey != nil && machine.LastSeen != nil &&
machine.AuthKey.Ephemeral &&
if machine.isEphemeral() && machine.LastSeen != nil &&
time.Now().
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
expiredFound = true
@@ -273,6 +282,53 @@ func (h *Headscale) expireEphemeralNodesWorker() {
}
}
func (h *Headscale) expireExpiredMachinesWorker() {
namespaces, err := h.ListNamespaces()
if err != nil {
log.Error().Err(err).Msg("Error listing namespaces")
return
}
for _, namespace := range namespaces {
machines, err := h.ListMachinesInNamespace(namespace.Name)
if err != nil {
log.Error().
Err(err).
Str("namespace", namespace.Name).
Msg("Error listing machines in namespace")
return
}
expiredFound := false
for index, machine := range machines {
if machine.isExpired() &&
machine.Expiry.After(h.getLastStateChange(namespace)) {
expiredFound = true
err := h.ExpireMachine(&machines[index])
if err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Str("name", machine.GivenName).
Msg("🤮 Cannot expire machine")
} else {
log.Info().
Str("machine", machine.Hostname).
Str("name", machine.GivenName).
Msg("Machine successfully expired")
}
}
}
if expiredFound {
h.setLastStateChangeToNow()
}
}
}
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
@@ -494,6 +550,7 @@ func (h *Headscale) Serve() error {
}
go h.expireEphemeralNodes(updateInterval)
go h.expireExpiredMachines(updateInterval)
go h.failoverSubnetRoutes(updateInterval)

View File

@@ -0,0 +1,114 @@
package main
//go:generate go run ./main.go
import (
"bytes"
"fmt"
"log"
"os"
"text/template"
)
var (
jobFileNameTemplate = `test-integration-v2-%s.yaml`
jobTemplate = template.Must(template.New("jobTemplate").Parse(`
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - {{.Name}}
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^{{.Name}}$"
`))
)
const workflowFilePerm = 0o600
func main() {
type testConfig struct {
Name string
}
// TODO(kradalby): automatic fetch tests at runtime
tests := []string{
"TestAuthKeyLogoutAndRelogin",
"TestAuthWebFlowAuthenticationPingAll",
"TestAuthWebFlowLogoutAndRelogin",
"TestCreateTailscale",
"TestEnablingRoutes",
"TestHeadscale",
"TestNamespaceCommand",
"TestOIDCAuthenticationPingAll",
"TestOIDCExpireNodes",
"TestPingAllByHostname",
"TestPingAllByIP",
"TestPreAuthKeyCommand",
"TestPreAuthKeyCommandReusableEphemeral",
"TestPreAuthKeyCommandWithoutExpiry",
"TestResolveMagicDNS",
"TestSSHIsBlockedInACL",
"TestSSHMultipleNamespacesAllToAll",
"TestSSHNoSSHConfigured",
"TestSSHOneNamespaceAllToAll",
"TestSSNamespaceOnlyIsolation",
"TestTaildrop",
"TestTailscaleNodesJoiningHeadcale",
}
for _, test := range tests {
var content bytes.Buffer
if err := jobTemplate.Execute(&content, testConfig{
Name: test,
}); err != nil {
log.Fatalf("failed to render template: %s", err)
}
path := "../../.github/workflows/" + fmt.Sprintf(jobFileNameTemplate, test)
err := os.WriteFile(path, content.Bytes(), workflowFilePerm)
if err != nil {
log.Fatalf("failed to write github job: %s", err)
}
}
}

View File

@@ -16,10 +16,11 @@ const (
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
accessTTL = 10 * time.Minute
refreshTTL = 60 * time.Minute
)
var accessTTL = 2 * time.Minute
func init() {
rootCmd.AddCommand(mockOidcCmd)
}
@@ -54,6 +55,16 @@ func mockOIDC() error {
if portStr == "" {
return errMockOidcPortNotDefined
}
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
if accessTTLOverride != "" {
newTTL, err := time.ParseDuration(accessTTLOverride)
if err != nil {
return err
}
accessTTL = newTTL
}
log.Info().Msgf("Access token TTL: %s", accessTTL)
port, err := strconv.Atoi(portStr)
if err != nil {

View File

@@ -475,6 +475,7 @@ func nodesToPtables(
"IP addresses",
"Ephemeral",
"Last seen",
"Expiration",
"Online",
"Expired",
}
@@ -501,8 +502,12 @@ func nodesToPtables(
}
var expiry time.Time
var expiryTime string
if machine.Expiry != nil {
expiry = machine.Expiry.AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05")
} else {
expiryTime = "N/A"
}
var machineKey key.MachinePublic
@@ -583,6 +588,7 @@ func nodesToPtables(
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
strconv.FormatBool(ephemeral),
lastSeenTime,
expiryTime,
online,
expired,
}

View File

@@ -63,6 +63,11 @@ noise:
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# While this looks like it can take arbitrary values, it
# needs to be within IP ranges supported by the Tailscale
# client.
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
@@ -235,6 +240,17 @@ dns_config:
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
@@ -260,6 +276,11 @@ unix_socket_permission: "0770"
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".

View File

@@ -6,6 +6,7 @@ import (
"io/fs"
"net/netip"
"net/url"
"os"
"strings"
"time"
@@ -26,6 +27,8 @@ const (
TextLogFormat = "text"
)
var errOidcMutuallyExclusive = errors.New("oidc_client_secret and oidc_client_secret_path are mutually exclusive")
// Config contains the initial Headscale configuration.
type Config struct {
ServerURL string
@@ -408,7 +411,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
if viper.IsSet("dns_config.restricted_nameservers") {
if len(dnsConfig.Nameservers) > 0 {
if len(dnsConfig.Resolvers) > 0 {
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
restrictedDNS := viper.GetStringMapStringSlice(
"dns_config.restricted_nameservers",
@@ -440,7 +443,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
if viper.IsSet("dns_config.domains") {
domains := viper.GetStringSlice("dns_config.domains")
if len(dnsConfig.Nameservers) > 0 {
if len(dnsConfig.Resolvers) > 0 {
dnsConfig.Domains = domains
} else if domains != nil {
log.Warn().
@@ -448,6 +451,20 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
}
if viper.IsSet("dns_config.extra_records") {
var extraRecords []tailcfg.DNSRecord
err := viper.UnmarshalKey("dns_config.extra_records", &extraRecords)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse dns_config.extra_records")
}
dnsConfig.ExtraRecords = extraRecords
}
if viper.IsSet("dns_config.magic_dns") {
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
}
@@ -514,6 +531,19 @@ func GetHeadscaleConfig() (*Config, error) {
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
}
oidcClientSecret := viper.GetString("oidc.client_secret")
oidcClientSecretPath := viper.GetString("oidc.client_secret_path")
if oidcClientSecretPath != "" && oidcClientSecret != "" {
return nil, errOidcMutuallyExclusive
}
if oidcClientSecretPath != "" {
secretBytes, err := os.ReadFile(os.ExpandEnv(oidcClientSecretPath))
if err != nil {
return nil, err
}
oidcClientSecret = string(secretBytes)
}
return &Config{
ServerURL: viper.GetString("server_url"),
Addr: viper.GetString("listen_addr"),
@@ -566,7 +596,7 @@ func GetHeadscaleConfig() (*Config, error) {
),
Issuer: viper.GetString("oidc.issuer"),
ClientID: viper.GetString("oidc.client_id"),
ClientSecret: viper.GetString("oidc.client_secret"),
ClientSecret: oidcClientSecret,
Scope: viper.GetStringSlice("oidc.scope"),
ExtraParams: viper.GetStringMapString("oidc.extra_params"),
AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"),

View File

@@ -12,6 +12,7 @@ please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Iss
- [Running headscale on Linux](running-headscale-linux.md)
- [Control headscale remotely](remote-cli.md)
- [Using a Windows client with headscale](windows-client.md)
- [Configuring OIDC](oidc.md)
### References
@@ -29,6 +30,7 @@ written by community members. It is _not_ verified by `headscale` developers.
- [Running headscale in a container](running-headscale-container.md)
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
- [Running headscale behind a reverse proxy](reverse-proxy.md)
- [Set Custom DNS records](dns-records.md)
## Misc

83
docs/dns-records.md Normal file
View File

@@ -0,0 +1,83 @@
# Setting custom DNS records
## Goal
This documentation has the goal of showing how a user can set custom DNS records with `headscale`s magic dns.
An example use case is to serve apps on the same host via a reverse proxy like NGINX, in this case a Prometheus monitoring stack. This allows to nicely access the service with "http://grafana.myvpn.example.com" instead of the hostname and portnum combination "http://hostname-in-magic-dns.myvpn.example.com:3000".
## Setup
### 1. Change the configuration
1. Change the `config.yaml` to contain the desired records like so:
```yaml
dns_config:
...
extra_records:
- name: "prometheus.myvpn.example.com"
type: "A"
value: "100.64.0.3"
- name: "grafana.myvpn.example.com"
type: "A"
value: "100.64.0.3"
...
```
2. Restart your headscale instance.
Beware of the limitations listed later on!
### 2. Verify that the records are set
You can use a DNS querying tool of your choice on one of your hosts to verify that your newly set records are actually available in MagicDNS, here we used [`dig`](https://man.archlinux.org/man/dig.1.en):
```
$ dig grafana.myvpn.example.com
; <<>> DiG 9.18.10 <<>> grafana.myvpn.example.com
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 44054
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 65494
;; QUESTION SECTION:
;grafana.myvpn.example.com. IN A
;; ANSWER SECTION:
grafana.myvpn.example.com. 593 IN A 100.64.0.3
;; Query time: 0 msec
;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)
;; WHEN: Sat Dec 31 11:46:55 CET 2022
;; MSG SIZE rcvd: 66
```
### 3. Optional: Setup the reverse proxy
The motivating example here was to be able to access internal monitoring services on the same host without specifying a port:
```
server {
listen 80;
listen [::]:80;
server_name grafana.myvpn.example.com;
location / {
proxy_pass http://localhost:3000;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
```
## Limitations
[Not all types of records are supported](https://github.com/tailscale/tailscale/blob/main/ipn/ipnlocal/local.go#L2891-L2909), especially no CNAME records.

141
docs/oidc.md Normal file
View File

@@ -0,0 +1,141 @@
# Configuring Headscale to use OIDC authentication
In order to authenticate users through a centralized solution one must enable the OIDC integration.
Known limitations:
- No dynamic ACL support
- OIDC groups cannot be used in ACLs
## Basic configuration
In your `config.yaml`, customize this to your liking:
```yaml
oidc:
# Block further startup until the OIDC provider is healthy and available
only_start_if_oidc_is_available: true
# Specified by your OIDC provider
issuer: "https://your-oidc.issuer.com/path"
# Specified/generated by your OIDC provider
client_id: "your-oidc-client-id"
client_secret: "your-oidc-client-secret"
# alternatively, set `client_secret_path` to read the secret from the file.
# It resolves environment variables, making integration to systemd's
# `LoadCredential` straightforward:
#client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
scope: ["openid", "profile", "email", "custom"]
# Optional: Passed on to the browser login request used to tweak behaviour for the OIDC provider
extra_params:
domain_hint: example.com
# Optional: List allowed principal domains and/or users. If an authenticated user's domain is not in this list,
# the authentication request will be rejected.
allowed_domains:
- example.com
# Optional. Note that groups from Keycloak have a leading '/'.
allowed_groups:
- /headscale
# Optional.
allowed_users:
- alice@example.com
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# namespace: `first-name.last-name.example.com`
strip_email_domain: true
```
## Azure AD example
In order to integrate Headscale with Azure Active Directory, we'll need to provision an App Registration with the correct scopes and redirect URI. Here with Terraform:
```hcl
resource "azuread_application" "headscale" {
display_name = "Headscale"
sign_in_audience = "AzureADMyOrg"
fallback_public_client_enabled = false
required_resource_access {
// Microsoft Graph
resource_app_id = "00000003-0000-0000-c000-000000000000"
resource_access {
// scope: profile
id = "14dad69e-099b-42c9-810b-d002981feec1"
type = "Scope"
}
resource_access {
// scope: openid
id = "37f7f235-527c-4136-accd-4a02d197296e"
type = "Scope"
}
resource_access {
// scope: email
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0"
type = "Scope"
}
}
web {
# Points at your running Headscale instance
redirect_uris = ["https://headscale.example.com/oidc/callback"]
implicit_grant {
access_token_issuance_enabled = false
id_token_issuance_enabled = true
}
}
group_membership_claims = ["SecurityGroup"]
optional_claims {
# Expose group memberships
id_token {
name = "groups"
}
}
}
resource "azuread_application_password" "headscale-application-secret" {
display_name = "Headscale Server"
application_object_id = azuread_application.headscale.object_id
}
resource "azuread_service_principal" "headscale" {
application_id = azuread_application.headscale.application_id
}
resource "azuread_service_principal_password" "headscale" {
service_principal_id = azuread_service_principal.headscale.id
end_date_relative = "44640h"
}
output "headscale_client_id" {
value = azuread_application.headscale.application_id
}
output "headscale_client_secret" {
value = azuread_application_password.headscale-application-secret.value
}
```
And in your Headscale `config.yaml`:
```yaml
oidc:
issuer: "https://login.microsoftonline.com/<tenant-UUID>/v2.0"
client_id: "<client-id-from-terraform>"
client_secret: "<client-secret-from-terraform>"
# Optional: add "groups"
scope: ["openid", "profile", "email"]
extra_params:
# Use your own domain, associated with Azure AD
domain_hint: example.com
# Optional: Force the Azure AD account picker
prompt: select_account
```

View File

@@ -98,3 +98,17 @@ spec:
upgrade_configs:
- upgrade_type: tailscale-control-protocol
```
## Caddy
The following Caddyfile is all that is necessary to use Caddy as a reverse proxy for headscale, in combination with the `config.yaml` specifications above to disable headscale's built in TLS. Replace values as necessary - `<YOUR_SERVER_NAME>` should be the FQDN at which headscale will be served, and `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `localhost:8080`.
```
<YOUR_SERVER_NAME> {
reverse_proxy <IP:PORT>
}
```
Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.
For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference.

View File

@@ -176,6 +176,7 @@ func (api headscaleV1APIServer) RegisterMachine(
machine, err := api.h.RegisterMachineFromAuthCallback(
request.GetKey(),
request.GetNamespace(),
nil,
RegisterMethodCLI,
)
if err != nil {

16
integration/README.md Normal file
View File

@@ -0,0 +1,16 @@
# Integration testing
Headscale relies on integration testing to ensure we remain compatible with Tailscale.
This is typically performed by starting a Headscale server and running a test "scenario"
with an array of Tailscale clients and versions.
Headscale's test framework and the current set of scenarios are defined in this directory.
Tests are located in files ending with `_test.go` and the framework are located in the rest.
## Running integration tests on GitHub Actions
Each test currently runs as a separate workflows in GitHub actions, to add new test, add
the new test to the list in `../cmd/gh-action-integration-generator/main.go` and run
`go generate` inside `../cmd/gh-action-integration-generator/` and commit the result.

View File

@@ -9,8 +9,10 @@ import (
"log"
"net"
"net/http"
"net/netip"
"strconv"
"testing"
"time"
"github.com/juanfont/headscale"
"github.com/juanfont/headscale/integration/dockertestutil"
@@ -22,7 +24,7 @@ import (
const (
dockerContextPath = "../."
hsicOIDCMockHashLength = 6
oidcServerPort = 10000
defaultAccessTTL = 10 * time.Minute
)
var errStatusCodeNotOK = errors.New("status code not OK")
@@ -50,11 +52,78 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
"namespace1": len(TailscaleVersions),
}
oidcConfig, err := scenario.runMockOIDC()
oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL)
if err != nil {
t.Errorf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
"CREDENTIALS_DIRECTORY_TEST": "/tmp",
"HEADSCALE_OIDC_CLIENT_SECRET_PATH": "${CREDENTIALS_DIRECTORY_TEST}/hs_client_oidc_secret",
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
}
err = scenario.CreateHeadscaleEnv(
spec,
hsic.WithTestName("oidcauthping"),
hsic.WithConfigEnv(oidcMap),
hsic.WithHostnameAsServerURL(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(oidcConfig.ClientSecret)),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := pingAll(t, allClients, allIps)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestOIDCExpireNodes(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
shortAccessTTL := 5 * time.Minute
baseScenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthOIDCScenario{
Scenario: baseScenario,
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
}
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
if err != nil {
t.Fatalf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
@@ -64,7 +133,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
err = scenario.CreateHeadscaleEnv(
spec,
hsic.WithTestName("oidcauthping"),
hsic.WithTestName("oidcexpirenodes"),
hsic.WithConfigEnv(oidcMap),
hsic.WithHostnameAsServerURL(),
)
@@ -87,20 +156,11 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := 0
success := pingAll(t, allClients, allIps)
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
// await all nodes being logged out after OIDC token expiry
scenario.WaitForTailscaleLogout()
err = scenario.Shutdown()
if err != nil {
@@ -143,7 +203,13 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv(
return nil
}
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDCConfig, error) {
port, err := dockertestutil.RandomFreeHostPort()
if err != nil {
log.Fatalf("could not find an open port: %s", err)
}
portNotation := fmt.Sprintf("%d/tcp", port)
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
@@ -151,16 +217,17 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
mockOidcOptions := &dockertest.RunOptions{
Name: hostname,
Cmd: []string{"headscale", "mockoidc"},
ExposedPorts: []string{"10000/tcp"},
ExposedPorts: []string{portNotation},
PortBindings: map[docker.Port][]docker.PortBinding{
"10000/tcp": {{HostPort: "10000"}},
docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
},
Networks: []*dockertest.Network{s.Scenario.network},
Env: []string{
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
"MOCKOIDC_PORT=10000",
fmt.Sprintf("MOCKOIDC_PORT=%d", port),
"MOCKOIDC_CLIENT_ID=superclient",
"MOCKOIDC_CLIENT_SECRET=supersecret",
fmt.Sprintf("MOCKOIDC_ACCESS_TTL=%s", accessTTL.String()),
},
}
@@ -169,7 +236,7 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
ContextDir: dockerContextPath,
}
err := s.pool.RemoveContainerByName(hostname)
err = s.pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
@@ -184,11 +251,7 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
}
log.Println("Waiting for headscale mock oidc to be ready for tests")
hostEndpoint := fmt.Sprintf(
"%s:%s",
s.mockOIDC.GetIPInNetwork(s.network),
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
)
hostEndpoint := fmt.Sprintf("%s:%d", s.mockOIDC.GetIPInNetwork(s.network), port)
if err := s.pool.Retry(func() error {
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
@@ -215,11 +278,11 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
return &headscale.OIDCConfig{
Issuer: fmt.Sprintf("http://%s/oidc",
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
ClientID: "superclient",
ClientSecret: "supersecret",
StripEmaildomain: true,
Issuer: fmt.Sprintf("http://%s/oidc", net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(port))),
ClientID: "superclient",
ClientSecret: "supersecret",
StripEmaildomain: true,
OnlyStartIfOIDCIsAvailable: true,
}, nil
}
@@ -292,6 +355,24 @@ func (s *AuthOIDCScenario) runTailscaleUp(
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
}
func pingAll(t *testing.T, clients []TailscaleClient, ips []netip.Addr) int {
t.Helper()
success := 0
for _, client := range clients {
for _, ip := range ips {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
return success
}
func (s *AuthOIDCScenario) Shutdown() error {
err := s.pool.Purge(s.mockOIDC)
if err != nil {

View File

@@ -141,13 +141,13 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
}
for _, client := range allClients {
_, _, err = client.Execute([]string{"tailscale", "logout"})
err := client.Logout()
if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
}
}
scenario.waitForTailscaleLogout()
scenario.WaitForTailscaleLogout()
t.Logf("all clients logged out")
@@ -259,22 +259,6 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
return nil
}
func (s *AuthWebFlowScenario) waitForTailscaleLogout() {
for _, namespace := range s.namespaces {
for _, client := range namespace.Clients {
namespace.syncWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.WaitForLogout()
}(client)
}
namespace.syncWaitGroup.Wait()
}
}
func (s *AuthWebFlowScenario) runTailscaleUp(
namespaceStr, loginServer string,
) error {

View File

@@ -11,7 +11,7 @@ type ControlServer interface {
GetEndpoint() string
WaitForReady() error
CreateNamespace(namespace string) error
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
CreateAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
GetCert() []byte
GetHostname() string

View File

@@ -2,6 +2,7 @@ package dockertestutil
import (
"errors"
"net"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
@@ -60,3 +61,20 @@ func AddContainerToNetwork(
return nil
}
// RandomFreeHostPort asks the kernel for a free open port that is ready to use.
// (from https://github.com/phayes/freeport)
func RandomFreeHostPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer listener.Close()
//nolint:forcetypeassert
return listener.Addr().(*net.TCPAddr).Port, nil
}

View File

@@ -2,6 +2,7 @@ package integration
import (
"fmt"
"net/netip"
"strings"
"testing"
"time"
@@ -66,6 +67,239 @@ func TestPingAllByIP(t *testing.T) {
}
}
func TestAuthKeyLogoutAndRelogin(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
clientIPs[client] = ips
}
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
}
}
scenario.WaitForTailscaleLogout()
t.Logf("all clients logged out")
headscale, err := scenario.Headscale()
if err != nil {
t.Errorf("failed to get headscale server: %s", err)
}
for namespaceName := range spec {
key, err := scenario.CreatePreAuthKey(namespaceName, true, false)
if err != nil {
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
}
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
}
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err = scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
ips, err := client.IPs()
if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
}
// lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) {
t.Errorf("IPs changed for client %s", client.Hostname())
}
for _, ip := range ips {
found := false
for _, oldIP := range clientIPs[client] {
if ip == oldIP {
found = true
break
}
}
if !found {
t.Errorf("IPs changed for client %s. Used to be %v now %v", client.Hostname(), clientIPs[client], ips)
}
}
}
t.Logf("all clients IPs are the same")
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestEphemeral(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
for namespaceName, clientCount := range spec {
err = scenario.CreateNamespace(namespaceName)
if err != nil {
t.Errorf("failed to create namespace %s: %s", namespaceName, err)
}
err = scenario.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, []tsic.Option{}...)
if err != nil {
t.Errorf("failed to create tailscale nodes in namespace %s: %s", namespaceName, err)
}
key, err := scenario.CreatePreAuthKey(namespaceName, true, true)
if err != nil {
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
}
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
}
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
err := client.Logout()
if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
}
}
scenario.WaitForTailscaleLogout()
t.Logf("all clients logged out")
for namespaceName := range spec {
machines, err := headscale.ListMachinesInNamespace(namespaceName)
if err != nil {
log.Error().
Err(err).
Str("namespace", namespaceName).
Msg("Error listing machines in namespace")
return
}
if len(machines) != 0 {
t.Errorf("expected no machines, got %d in namespace %s", len(machines), namespaceName)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestPingAllByHostname(t *testing.T) {
IntegrationSkip(t)
t.Parallel()

View File

@@ -60,6 +60,8 @@ package hsic
// }
// TODO: Reuse the actual configuration object above.
// Deprecated: use env function instead as it is easier to
// override.
func DefaultConfigYAML() string {
yaml := `
log:
@@ -95,3 +97,35 @@ derp:
return yaml
}
func MinimumConfigYAML() string {
return `
private_key_path: /tmp/private.key
noise:
private_key_path: /tmp/noise_private.key
`
}
func DefaultConfigEnv() map[string]string {
return map[string]string{
"HEADSCALE_LOG_LEVEL": "trace",
"HEADSCALE_ACL_POLICY_PATH": "",
"HEADSCALE_DB_TYPE": "sqlite3",
"HEADSCALE_DB_PATH": "/tmp/integration_test_db.sqlite3",
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m",
"HEADSCALE_NODE_UPDATE_CHECK_INTERVAL": "10s",
"HEADSCALE_IP_PREFIXES": "fd7a:115c:a1e0::/48 100.64.0.0/10",
"HEADSCALE_DNS_CONFIG_BASE_DOMAIN": "headscale.net",
"HEADSCALE_DNS_CONFIG_MAGIC_DNS": "true",
"HEADSCALE_DNS_CONFIG_DOMAINS": "",
"HEADSCALE_DNS_CONFIG_NAMESERVERS": "127.0.0.11 1.1.1.1",
"HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key",
"HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key",
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:8080",
"HEADSCALE_METRICS_LISTEN_ADDR": "127.0.0.1:9090",
"HEADSCALE_SERVER_URL": "http://headscale:8080",
"HEADSCALE_DERP_URLS": "https://controlplane.tailscale.com/derpmap/default",
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "false",
"HEADSCALE_DERP_UPDATE_FREQUENCY": "1m",
}
}

View File

@@ -17,6 +17,7 @@ import (
"net/http"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/dockertestutil"
@@ -35,6 +36,11 @@ const (
var errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
type fileInContainer struct {
path string
contents []byte
}
type HeadscaleInContainer struct {
hostname string
@@ -43,11 +49,12 @@ type HeadscaleInContainer struct {
network *dockertest.Network
// optional config
port int
aclPolicy *headscale.ACLPolicy
env []string
tlsCert []byte
tlsKey []byte
port int
aclPolicy *headscale.ACLPolicy
env map[string]string
tlsCert []byte
tlsKey []byte
filesInContainer []fileInContainer
}
type Option = func(c *HeadscaleInContainer)
@@ -55,7 +62,7 @@ type Option = func(c *HeadscaleInContainer)
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
return func(hsic *HeadscaleInContainer) {
// TODO(kradalby): Move somewhere appropriate
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
hsic.env["HEADSCALE_ACL_POLICY_PATH"] = aclPolicyPath
hsic.aclPolicy = acl
}
@@ -69,8 +76,8 @@ func WithTLS() Option {
}
// TODO(kradalby): Move somewhere appropriate
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_CERT_PATH=%s", tlsCertPath))
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_KEY_PATH=%s", tlsKeyPath))
hsic.env["HEADSCALE_TLS_CERT_PATH"] = tlsCertPath
hsic.env["HEADSCALE_TLS_KEY_PATH"] = tlsKeyPath
hsic.tlsCert = cert
hsic.tlsKey = key
@@ -80,7 +87,7 @@ func WithTLS() Option {
func WithConfigEnv(configEnv map[string]string) Option {
return func(hsic *HeadscaleInContainer) {
for key, value := range configEnv {
hsic.env = append(hsic.env, fmt.Sprintf("%s=%s", key, value))
hsic.env[key] = value
}
}
}
@@ -102,12 +109,20 @@ func WithTestName(testName string) Option {
func WithHostnameAsServerURL() Option {
return func(hsic *HeadscaleInContainer) {
hsic.env = append(
hsic.env,
fmt.Sprintf("HEADSCALE_SERVER_URL=http://%s:%d",
hsic.GetHostname(),
hsic.port,
))
hsic.env["HEADSCALE_SERVER_URL"] = fmt.Sprintf("http://%s",
net.JoinHostPort(hsic.GetHostname(),
fmt.Sprintf("%d", hsic.port)),
)
}
}
func WithFileInContainer(path string, contents []byte) Option {
return func(hsic *HeadscaleInContainer) {
hsic.filesInContainer = append(hsic.filesInContainer,
fileInContainer{
path: path,
contents: contents,
})
}
}
@@ -129,6 +144,9 @@ func New(
pool: pool,
network: network,
env: DefaultConfigEnv(),
filesInContainer: []fileInContainer{},
}
for _, opt := range opts {
@@ -144,6 +162,13 @@ func New(
ContextDir: dockerContextPath,
}
env := []string{}
for key, value := range hsic.env {
env = append(env, fmt.Sprintf("%s=%s", key, value))
}
log.Printf("ENV: \n%s", spew.Sdump(hsic.env))
runOptions := &dockertest.RunOptions{
Name: hsic.hostname,
ExposedPorts: []string{portProto},
@@ -152,7 +177,7 @@ func New(
// TODO(kradalby): Get rid of this hack, we currently need to give us some
// to inject the headscale configuration further down.
Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve"},
Env: hsic.env,
Env: env,
}
// dockertest isnt very good at handling containers that has already
@@ -177,7 +202,7 @@ func New(
hsic.container = container
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(DefaultConfigYAML()))
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(MinimumConfigYAML()))
if err != nil {
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
}
@@ -206,6 +231,12 @@ func New(
}
}
for _, f := range hsic.filesInContainer {
if err := hsic.WriteFile(f.path, f.contents); err != nil {
return nil, fmt.Errorf("failed to write %q: %w", f.path, err)
}
}
return hsic, nil
}
@@ -316,6 +347,8 @@ func (t *HeadscaleInContainer) CreateNamespace(
func (t *HeadscaleInContainer) CreateAuthKey(
namespace string,
reusable bool,
ephemeral bool,
) (*v1.PreAuthKey, error) {
command := []string{
"headscale",
@@ -323,13 +356,20 @@ func (t *HeadscaleInContainer) CreateAuthKey(
namespace,
"preauthkeys",
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
}
if reusable {
command = append(command, "--reusable")
}
if ephemeral {
command = append(command, "--ephemeral")
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,

View File

@@ -43,11 +43,11 @@ var (
"1.24.2",
"1.22.2",
"1.20.4",
"1.18.2",
}
// tailscaleVersionsUnavailable = []string{
// // These versions seem to fail when fetching from apt.
// "1.18.2",
// "1.16.2",
// "1.14.6",
// "1.12.4",
@@ -194,9 +194,9 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
return headscale, nil
}
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
func (s *Scenario) CreatePreAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) {
if headscale, err := s.Headscale(); err == nil {
key, err := headscale.CreateAuthKey(namespace)
key, err := headscale.CreateAuthKey(namespace, reusable, ephemeral)
if err != nil {
return nil, fmt.Errorf("failed to create namespace: %w", err)
}
@@ -368,7 +368,7 @@ func (s *Scenario) CreateHeadscaleEnv(
return err
}
key, err := s.CreatePreAuthKey(namespaceName)
key, err := s.CreatePreAuthKey(namespaceName, true, false)
if err != nil {
return err
}
@@ -469,3 +469,19 @@ func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, er
return allFQDNs, nil
}
func (s *Scenario) WaitForTailscaleLogout() {
for _, namespace := range s.namespaces {
for _, client := range namespace.Clients {
namespace.syncWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.WaitForLogout()
}(client)
}
namespace.syncWaitGroup.Wait()
}
}

View File

@@ -62,7 +62,7 @@ func TestHeadscale(t *testing.T) {
})
t.Run("create-auth-key", func(t *testing.T) {
_, err := scenario.CreatePreAuthKey(namespace)
_, err := scenario.CreatePreAuthKey(namespace, true, false)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}
@@ -166,7 +166,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
})
t.Run("join-headscale", func(t *testing.T) {
key, err := scenario.CreatePreAuthKey(namespace)
key, err := scenario.CreatePreAuthKey(namespace, true, false)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}

View File

@@ -15,6 +15,7 @@ type TailscaleClient interface {
Execute(command []string) (string, string, error)
Up(loginServer, authKey string) error
UpWithLoginURL(loginServer string) (*url.URL, error)
Logout() error
IPs() ([]netip.Addr, error)
FQDN() (string, error)
Status() (*ipnstate.Status, error)

View File

@@ -270,6 +270,15 @@ func (t *TailscaleInContainer) UpWithLoginURL(
return loginURL, nil
}
func (t *TailscaleInContainer) Logout() error {
_, _, err := t.Execute([]string{"tailscale", "logout"})
if err != nil {
return err
}
return nil
}
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
if t.ips != nil && len(t.ips) != 0 {
return t.ips, nil

View File

@@ -153,9 +153,15 @@ func (machine *Machine) isOnline() bool {
return machine.LastSeen.After(time.Now().Add(-keepAliveInterval))
}
// isEphemeral returns if the machine is registered as an Ephemeral node.
// https://tailscale.com/kb/1111/ephemeral-nodes/
func (machine *Machine) isEphemeral() bool {
return machine.AuthKey != nil && machine.AuthKey.Ephemeral
}
func containsAddresses(inputs []string, addrs []string) bool {
for _, addr := range addrs {
if contains(inputs, addr) {
if containsStr(inputs, addr) {
return true
}
}
@@ -188,6 +194,7 @@ func getFilteredByACLPeers(
peers := make(map[uint64]Machine)
// Aclfilter peers here. We are itering through machines in all namespaces and search through the computed aclRules
// for match between rule SrcIPs and DstPorts. If the rule is a match we allow the machine to be viewable.
machineIPs := machine.IPAddresses.ToStringSlice()
for _, peer := range machines {
if peer.ID == machine.ID {
continue
@@ -197,22 +204,23 @@ func getFilteredByACLPeers(
for _, d := range rule.DstPorts {
dst = append(dst, d.IP)
}
peerIPs := peer.IPAddresses.ToStringSlice()
if matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
machine.IPAddresses.ToStringSlice(),
peer.IPAddresses.ToStringSlice(),
machineIPs,
peerIPs,
) || // match source and destination
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
peer.IPAddresses.ToStringSlice(),
machine.IPAddresses.ToStringSlice(),
peerIPs,
machineIPs,
) || // match return path
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
machine.IPAddresses.ToStringSlice(),
machineIPs,
[]string{"*"},
) || // match source and all destination
matchSourceAndDestinationWithRule(
@@ -225,13 +233,13 @@ func getFilteredByACLPeers(
rule.SrcIPs,
dst,
[]string{"*"},
peer.IPAddresses.ToStringSlice(),
peerIPs,
) || // match source and all destination
matchSourceAndDestinationWithRule(
rule.SrcIPs,
dst,
[]string{"*"},
machine.IPAddresses.ToStringSlice(),
machineIPs,
) { // match all sources and source
peers[peer.ID] = peer
}
@@ -386,7 +394,7 @@ func (h *Headscale) GetMachineByGivenName(namespace string, givenName string) (*
// GetMachineByID finds a Machine by ID and returns the Machine struct.
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
m := Machine{}
if result := h.db.Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
if result := h.db.Preload("AuthKey").Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
return nil, result.Error
}
@@ -398,7 +406,7 @@ func (h *Headscale) GetMachineByMachineKey(
machineKey key.MachinePublic,
) (*Machine, error) {
m := Machine{}
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
return nil, result.Error
}
@@ -410,7 +418,7 @@ func (h *Headscale) GetMachineByNodeKey(
nodeKey key.NodePublic,
) (*Machine, error) {
machine := Machine{}
if result := h.db.Preload("Namespace").First(&machine, "node_key = ?",
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "node_key = ?",
NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
return nil, result.Error
}
@@ -423,7 +431,7 @@ func (h *Headscale) GetMachineByAnyKey(
machineKey key.MachinePublic, nodeKey key.NodePublic, oldNodeKey key.NodePublic,
) (*Machine, error) {
machine := Machine{}
if result := h.db.Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
MachinePublicKeyStripPrefix(machineKey),
NodePublicKeyStripPrefix(nodeKey),
NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
@@ -683,7 +691,15 @@ func (h *Headscale) toNode(
}
primaryPrefixes := Routes(primaryRoutes).toPrefixes()
allowedIPs = append(allowedIPs, primaryPrefixes...)
machineRoutes, err := h.GetMachineRoutes(&machine)
if err != nil {
return nil, err
}
for _, route := range machineRoutes {
if route.Enabled && (route.IsPrimary || route.isExitRoute()) {
allowedIPs = append(allowedIPs, netip.Prefix(route.Prefix))
}
}
var derp string
if machine.HostInfo.NetInfo != nil {
@@ -844,6 +860,7 @@ func getTags(
func (h *Headscale) RegisterMachineFromAuthCallback(
nodeKeyStr string,
namespaceName string,
machineExpiry *time.Time,
registrationMethod string,
) (*Machine, error) {
nodeKey := key.NodePublic{}
@@ -856,6 +873,7 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
Str("nodeKey", nodeKey.ShortString()).
Str("namespaceName", namespaceName).
Str("registrationMethod", registrationMethod).
Str("expiresAt", fmt.Sprintf("%v", machineExpiry)).
Msg("Registering machine from API/CLI or auth callback")
if machineInterface, ok := h.registrationCache.Get(NodePublicKeyStripPrefix(nodeKey)); ok {
@@ -877,6 +895,10 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
registrationMachine.NamespaceID = namespace.ID
registrationMachine.RegisterMethod = registrationMethod
if machineExpiry != nil {
registrationMachine.Expiry = machineExpiry
}
machine, err := h.RegisterMachine(
registrationMachine,
)

13
oidc.go
View File

@@ -218,7 +218,7 @@ func (h *Headscale) OIDCCallback(
return
}
nodeKey, machineExists, err := h.validateMachineForOIDCCallback(writer, state, claims)
nodeKey, machineExists, err := h.validateMachineForOIDCCallback(writer, state, claims, idToken.Expiry)
if err != nil || machineExists {
return
}
@@ -236,7 +236,7 @@ func (h *Headscale) OIDCCallback(
return
}
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey); err != nil {
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey, idToken.Expiry); err != nil {
return
}
@@ -476,6 +476,7 @@ func (h *Headscale) validateMachineForOIDCCallback(
writer http.ResponseWriter,
state string,
claims *IDTokenClaims,
expiry time.Time,
) (*key.NodePublic, bool, error) {
// retrieve machinekey from state cache
nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
@@ -546,7 +547,7 @@ func (h *Headscale) validateMachineForOIDCCallback(
Str("machine", machine.Hostname).
Msg("machine already registered, reauthenticating")
err := h.RefreshMachine(machine, time.Time{})
err := h.RefreshMachine(machine, expiry)
if err != nil {
log.Error().
Caller().
@@ -560,6 +561,10 @@ func (h *Headscale) validateMachineForOIDCCallback(
return nil, true, err
}
log.Debug().
Str("machine", machine.Hostname).
Str("expiresAt", fmt.Sprintf("%v", expiry)).
Msg("successfully refreshed machine")
var content bytes.Buffer
if err := oidcCallbackTemplate.Execute(&content, oidcCallbackTemplateConfig{
@@ -679,10 +684,12 @@ func (h *Headscale) registerMachineForOIDCCallback(
writer http.ResponseWriter,
namespace *Namespace,
nodeKey *key.NodePublic,
expiry time.Time,
) error {
if _, err := h.RegisterMachineFromAuthCallback(
nodeKey.String(),
namespace.Name,
&expiry,
RegisterMethodOIDC,
); err != nil {
log.Error().

View File

@@ -255,7 +255,7 @@ func (h *Headscale) ApplePlatformConfig(
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write(
[]byte("Invalid platform, only ios and macos is supported"),
[]byte("Invalid platform. Only ios, macos-app-store and macos-standalone are supported"),
)
if err != nil {
log.Error().

View File

@@ -622,6 +622,20 @@ func (h *Headscale) handleMachineLogOutCommon(
Caller().
Err(err).
Msg("Failed to write response")
return
}
if machine.isEphemeral() {
err = h.HardDeleteMachine(&machine)
if err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Msg("Cannot delete ephemeral machine from the database")
}
return
}
log.Info().

View File

@@ -664,7 +664,11 @@ func (h *Headscale) scheduledPollWorker(
Str("machine", machine.Hostname).
Bool("noise", isNoise).
Msg("Sending keepalive")
keepAliveChan <- data
select {
case keepAliveChan <- data:
case <-ctx.Done():
return
}
case <-updateCheckerTicker.C:
log.Debug().
@@ -674,7 +678,11 @@ func (h *Headscale) scheduledPollWorker(
Msg("Sending update request")
updateRequestsFromNode.WithLabelValues(machine.Namespace.Name, machine.Hostname, "scheduled-update").
Inc()
updateChan <- struct{}{}
select {
case updateChan <- struct{}{}:
case <-ctx.Done():
return
}
}
}
}

View File

@@ -6,6 +6,7 @@ import (
"gopkg.in/check.v1"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
func (s *Suite) TestGetRoutes(c *check.C) {
@@ -352,3 +353,102 @@ func (s *Suite) TestSubnetFailover(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
}
// TestAllowedIPRoutes tests that the AllowedIPs are correctly set for a node,
// including both the primary routes the node is responsible for, and the
// exit node routes if enabled.
func (s *Suite) TestAllowedIPRoutes(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("test", "test_enable_route_machine")
c.Assert(err, check.NotNil)
prefix, err := netip.ParsePrefix(
"10.0.0.0/24",
)
c.Assert(err, check.IsNil)
prefix2, err := netip.ParsePrefix(
"150.0.10.0/25",
)
c.Assert(err, check.IsNil)
prefixExitNodeV4, err := netip.ParsePrefix(
"0.0.0.0/0",
)
c.Assert(err, check.IsNil)
prefixExitNodeV6, err := netip.ParsePrefix(
"::/0",
)
c.Assert(err, check.IsNil)
hostInfo1 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2, prefixExitNodeV4, prefixExitNodeV6},
}
nodeKey := key.NewNode()
discoKey := key.NewDisco()
machineKey := key.NewMachine()
now := time.Now()
machine1 := Machine{
ID: 1,
MachineKey: MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: DiscoPublicKeyStripPrefix(discoKey.Public()),
Hostname: "test_enable_route_machine",
NamespaceID: namespace.ID,
RegisterMethod: RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: HostInfo(hostInfo1),
LastSeen: &now,
}
app.db.Save(&machine1)
err = app.processMachineRoutes(&machine1)
c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefix.String())
c.Assert(err, check.IsNil)
// We do not enable this one on purpose to test that it is not enabled
// err = app.EnableRoutes(&machine1, prefix2.String())
// c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefixExitNodeV4.String())
c.Assert(err, check.IsNil)
err = app.EnableRoutes(&machine1, prefixExitNodeV6.String())
c.Assert(err, check.IsNil)
err = app.handlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err := app.GetEnabledRoutes(&machine1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 3)
peer, err := app.toNode(machine1, "headscale.net", nil)
c.Assert(err, check.IsNil)
c.Assert(len(peer.AllowedIPs), check.Equals, 3)
foundExitNodeV4 := false
foundExitNodeV6 := false
for _, allowedIP := range peer.AllowedIPs {
if allowedIP == prefixExitNodeV4 {
foundExitNodeV4 = true
}
if allowedIP == prefixExitNodeV6 {
foundExitNodeV6 = true
}
}
c.Assert(foundExitNodeV4, check.Equals, true)
c.Assert(foundExitNodeV6, check.Equals, true)
}

View File

@@ -24,7 +24,7 @@
<li>
ALT + Click the Tailscale icon in the menu and hover over the Debug menu
</li>
<li>Under "Custm Login Server", select "Add Account..."</li>
<li>Under "Custom Login Server", select "Add Account..."</li>
<li>
Enter "{{.URL}}" of the headscale instance and press "Add Account"
</li>
@@ -68,7 +68,16 @@
<!--
<pre><code>curl {{.URL}}/apple/ios</code></pre>
-->
<pre><code>curl {{.URL}}/apple/macos</code></pre>
<ul>
<li>
for app store client:
<code>curl {{.URL}}/apple/macos-app-store</code>
</li>
<li>
for standalone client:
<code>curl {{.URL}}/apple/macos-standalone</code>
</li>
</ul>
<h2>Profiles</h2>

View File

@@ -269,6 +269,16 @@ func stringToIPPrefix(prefixes []string) ([]netip.Prefix, error) {
return result, nil
}
func containsStr(ts []string, t string) bool {
for _, v := range ts {
if v == t {
return true
}
}
return false
}
func contains[T string | netip.Prefix](ts []T, t T) bool {
for _, v := range ts {
if reflect.DeepEqual(v, t) {