mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-15 10:27:48 +00:00
Compare commits
29 Commits
fix-typo-s
...
web-auth-f
Author | SHA1 | Date | |
---|---|---|---|
![]() |
70e08462b3 | ||
![]() |
a231ece825 | ||
![]() |
ae43d82a33 | ||
![]() |
ad3c36fd07 | ||
![]() |
f176503448 | ||
![]() |
f7ad88aa08 | ||
![]() |
f63d22655c | ||
![]() |
89c468fc43 | ||
![]() |
b0fda6b216 | ||
![]() |
154fb59bdb | ||
![]() |
d3e9703fb5 | ||
![]() |
7ce3f8c7d1 | ||
![]() |
58c8633cc1 | ||
![]() |
b3f5af30a4 | ||
![]() |
9f64ac8a33 | ||
![]() |
aa1cc05cfb | ||
![]() |
670ef9a93e | ||
![]() |
987abcfdce | ||
![]() |
c70f5696dc | ||
![]() |
825e88311e | ||
![]() |
bbc8cb11da | ||
![]() |
3a6ef6bece | ||
![]() |
b2dc480f22 | ||
![]() |
5d7eae46f8 | ||
![]() |
45cb0f3fa3 | ||
![]() |
658478cba3 | ||
![]() |
ec90e9d716 | ||
![]() |
181f1eeb4f | ||
![]() |
e270cf6d20 |
80
.github/workflows/release.yml
vendored
80
.github/workflows/release.yml
vendored
@@ -65,8 +65,8 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
@@ -125,13 +125,13 @@ jobs:
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
suffix=-debug,onlatest=true
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
type=semver,pattern={{version}}-debug
|
||||
type=semver,pattern={{major}}.{{minor}}-debug
|
||||
type=semver,pattern={{major}}-debug
|
||||
type=raw,value=latest-debug
|
||||
type=sha,suffix=-debug
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
@@ -161,3 +161,69 @@ jobs:
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
||||
docker-alpine-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-alpine
|
||||
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-alpine-
|
||||
- name: Docker meta
|
||||
id: meta-alpine
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
type=raw,value=latest-alpine
|
||||
type=sha,suffix=-alpine
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.alpine
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-alpine.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine
|
||||
|
27
.github/workflows/renovatebot.yml
vendored
Normal file
27
.github/workflows/renovatebot.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Renovate
|
||||
on:
|
||||
schedule:
|
||||
- cron: "* * 5,20 * *" # Every 5th and 20th of the month
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
uses: machine-learning-apps/actions-app-token@master
|
||||
with:
|
||||
APP_PEM: ${{ secrets.RENOVATEBOT_SECRET }}
|
||||
APP_ID: ${{ secrets.RENOVATEBOT_APP_ID }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@v31.81.3
|
||||
with:
|
||||
configurationFile: .github/renovate.json
|
||||
token: "x-access-token:${{ steps.get_token.outputs.app_token }}"
|
||||
# env:
|
||||
# LOG_LEVEL: "debug"
|
35
.github/workflows/test-integration-oidc.yml
vendored
Normal file
35
.github/workflows/test-integration-oidc.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test OIDC
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-oidc:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run OIDC integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_oidc
|
35
.github/workflows/test-integration-v2-general-auth.yml
vendored
Normal file
35
.github/workflows/test-integration-v2-general-auth.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Integration Test v2
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-v2:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_v2_auth_web_flow
|
20
CHANGELOG.md
20
CHANGELOG.md
@@ -1,27 +1,14 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.17.0 (2022-11-26)
|
||||
## 0.17.0 (2022-XX-XX)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- `noise.private_key_path` has been added and is required for the new noise protocol.
|
||||
- Log level option `log_level` was moved to a distinct `log` config section and renamed to `level` [#768](https://github.com/juanfont/headscale/pull/768)
|
||||
- Removed Alpine Linux container image [#962](https://github.com/juanfont/headscale/pull/962)
|
||||
|
||||
### Important Changes
|
||||
|
||||
- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)
|
||||
- Add experimental support for [SSH ACL](https://tailscale.com/kb/1018/acls/#tailscale-ssh) (see docs for limitations) [#847](https://github.com/juanfont/headscale/pull/847)
|
||||
- Please note that this support should be considered _partially_ implemented
|
||||
- SSH ACLs status:
|
||||
- Support `accept` and `check` (SSH can be enabled and used for connecting and authentication)
|
||||
- Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**.
|
||||
- If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients.
|
||||
- We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback.
|
||||
- This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`.
|
||||
|
||||
### Changes
|
||||
|
||||
- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)
|
||||
- Add ability to specify config location via env var `HEADSCALE_CONFIG` [#674](https://github.com/juanfont/headscale/issues/674)
|
||||
- Target Go 1.19 for Headscale [#778](https://github.com/juanfont/headscale/pull/778)
|
||||
- Target Tailscale v1.30.0 to build Headscale [#780](https://github.com/juanfont/headscale/pull/780)
|
||||
@@ -38,9 +25,6 @@
|
||||
- Add `dns_config.override_local_dns` option [#905](https://github.com/juanfont/headscale/pull/905)
|
||||
- Fix some DNS config issues [#660](https://github.com/juanfont/headscale/issues/660)
|
||||
- Make it possible to disable TS2019 with build flag [#928](https://github.com/juanfont/headscale/pull/928)
|
||||
- Fix OIDC registration issues [#960](https://github.com/juanfont/headscale/pull/960) and [#971](https://github.com/juanfont/headscale/pull/971)
|
||||
- Add support for specifying NextDNS DNS-over-HTTPS resolver [#940](https://github.com/juanfont/headscale/pull/940)
|
||||
- Make more sslmode available for postgresql connection [#927](https://github.com/juanfont/headscale/pull/927)
|
||||
|
||||
## 0.16.4 (2022-08-21)
|
||||
|
||||
|
24
Dockerfile.alpine
Normal file
24
Dockerfile.alpine
Normal file
@@ -0,0 +1,24 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.19.0-alpine AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN apk add gcc musl-dev
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM docker.io/alpine:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
@@ -4,16 +4,14 @@ ARG TAILSCALE_VERSION=*
|
||||
ARG TAILSCALE_CHANNEL=stable
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl ssh \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN adduser --shell=/bin/bash ssh-it-user
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
@@ -1,10 +1,9 @@
|
||||
FROM golang:latest
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables ssh \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd --shell=/bin/bash --create-home ssh-it-user
|
||||
|
||||
RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
@@ -19,6 +18,6 @@ RUN cp tailscale /usr/local/bin/
|
||||
RUN cp tailscaled /usr/local/bin/
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
23
Makefile
23
Makefile
@@ -48,6 +48,16 @@ test_integration_derp:
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
|
||||
|
||||
test_integration_oidc:
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationOIDC ./...
|
||||
|
||||
test_integration_v2_general:
|
||||
docker run \
|
||||
-t --rm \
|
||||
@@ -56,7 +66,18 @@ test_integration_v2_general:
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
go test $(TAGS) -failfast ./... -timeout 60m -parallel 6
|
||||
|
||||
|
||||
test_integration_v2_auth_web_flow:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test ./... -timeout 60m -parallel 6 -run TestAuthWebFlow
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
102
README.md
102
README.md
@@ -269,13 +269,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Mike Lloyd</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Niek>
|
||||
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
|
||||
@@ -290,8 +283,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/617a7a>
|
||||
<img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/>
|
||||
@@ -299,6 +290,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Azz</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/qbit>
|
||||
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
|
||||
@@ -327,15 +327,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Fernando De Lucchi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/OrvilleQ>
|
||||
<img src=https://avatars.githubusercontent.com/u/21377465?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Orville Q. Song/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Orville Q. Song</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
|
||||
@@ -343,6 +334,8 @@ make build
|
||||
<sub style="font-size:14px"><b>hdhoang</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/bravechamp>
|
||||
<img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/>
|
||||
@@ -378,8 +371,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Mevan Samaratunga</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/dragetd>
|
||||
<img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./>
|
||||
@@ -387,6 +378,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Michael G.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ptman>
|
||||
<img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/>
|
||||
@@ -422,8 +415,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Artem Klevtsov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/>
|
||||
@@ -431,6 +422,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/CNLHC>
|
||||
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
|
||||
@@ -452,13 +445,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Silver Bullet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/snh>
|
||||
<img src=https://avatars.githubusercontent.com/u/2051768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Steven Honson/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Steven Honson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ratsclub>
|
||||
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
|
||||
@@ -466,8 +452,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Victor Freire</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lachy2849>
|
||||
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
|
||||
@@ -482,6 +466,8 @@ make build
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aberoham>
|
||||
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
|
||||
@@ -510,15 +496,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Aofei Sheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/arnarg>
|
||||
<img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Arnar</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
@@ -533,6 +510,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Bryan Stenson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/yangchuansheng>
|
||||
<img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/>
|
||||
@@ -554,8 +533,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/felixonmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
|
||||
@@ -577,6 +554,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ShadowJonathan>
|
||||
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
|
||||
@@ -591,6 +570,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Pierre Carru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Donran>
|
||||
<img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pontus N</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nnsee>
|
||||
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
|
||||
@@ -598,8 +584,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Rasmus Moorats</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/rcursaru>
|
||||
<img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/>
|
||||
@@ -614,6 +598,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Mend Renovate</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ryanfowler>
|
||||
<img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/>
|
||||
@@ -642,8 +628,6 @@ make build
|
||||
<sub style="font-size:14px"><b>sophware</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/m-tanner-dev0>
|
||||
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
|
||||
@@ -658,6 +642,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
@@ -686,8 +672,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/y0ngb1n>
|
||||
<img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yang Bin/>
|
||||
@@ -702,6 +686,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
@@ -730,8 +716,6 @@ make build
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nning>
|
||||
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
|
||||
@@ -746,13 +730,8 @@ make build
|
||||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/magichuihui>
|
||||
<img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>suhelen</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lion24>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sharkonet/>
|
||||
@@ -760,13 +739,6 @@ make build
|
||||
<sub style="font-size:14px"><b>sharkonet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/manju-rn>
|
||||
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>manju-rn</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pernila>
|
||||
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=pernila/>
|
||||
@@ -774,8 +746,6 @@ make build
|
||||
<sub style="font-size:14px"><b>pernila</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/phpmalik>
|
||||
<img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/>
|
||||
|
123
acls.go
123
acls.go
@@ -10,12 +10,10 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -56,8 +54,6 @@ const (
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
var featureEnableSSH = envknob.RegisterBool("HEADSCALE_EXPERIMENTAL_FEATURE_SSH")
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
@@ -124,20 +120,6 @@ func (h *Headscale) UpdateACLRules() error {
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
h.aclRules = rules
|
||||
|
||||
if featureEnableSSH() {
|
||||
sshRules, err := h.generateSSHRules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("SSH", sshRules).Msg("SSH rules generated")
|
||||
if h.sshPolicy == nil {
|
||||
h.sshPolicy = &tailcfg.SSHPolicy{}
|
||||
}
|
||||
h.sshPolicy.Rules = sshRules
|
||||
} else if h.aclPolicy != nil && len(h.aclPolicy.SSHs) > 0 {
|
||||
log.Info().Msg("SSH ACLs has been defined, but HEADSCALE_EXPERIMENTAL_FEATURE_SSH is not enabled, this is a unstable feature, check docs before activating")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -205,111 +187,6 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
|
||||
rules := []*tailcfg.SSHRule{}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acceptAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}
|
||||
|
||||
rejectAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: true,
|
||||
Accept: false,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: false,
|
||||
}
|
||||
|
||||
for index, sshACL := range h.aclPolicy.SSHs {
|
||||
action := rejectAction
|
||||
switch sshACL.Action {
|
||||
case "accept":
|
||||
action = acceptAction
|
||||
case "check":
|
||||
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
|
||||
} else {
|
||||
action = *checkAction
|
||||
}
|
||||
default:
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, unknown action '%s'", index, sshACL.Action)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
|
||||
for innerIndex, rawSrc := range sshACL.Sources {
|
||||
expandedSrcs, err := expandAlias(
|
||||
machines,
|
||||
*h.aclPolicy,
|
||||
rawSrc,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
for _, expandedSrc := range expandedSrcs {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: expandedSrc,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(sshACL.Users))
|
||||
for _, user := range sshACL.Users {
|
||||
userMap[user] = "="
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
RuleExpires: nil,
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||
sessionLength, err := time.ParseDuration(duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: sessionLength,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
|
76
acls_test.go
76
acls_test.go
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -74,81 +73,6 @@ func (s *Suite) TestInvalidAction(c *check.C) {
|
||||
c.Assert(errors.Is(err, errInvalidAction), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestSshRules(c *check.C) {
|
||||
envknob.Setenv("HEADSCALE_EXPERIMENTAL_FEATURE_SSH", "1")
|
||||
|
||||
namespace, err := app.CreateNamespace("user1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
RequestTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{
|
||||
"group:test": []string{"user1"},
|
||||
},
|
||||
Hosts: Hosts{
|
||||
"client": netip.PrefixFrom(netip.MustParseAddr("100.64.99.42"), 32),
|
||||
},
|
||||
ACLs: []ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:test"},
|
||||
Destinations: []string{"client"},
|
||||
Users: []string{"autogroup:nonroot"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"client"},
|
||||
Users: []string{"autogroup:nonroot"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = app.UpdateACLRules()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(app.sshPolicy, check.NotNil)
|
||||
c.Assert(app.sshPolicy.Rules, check.HasLen, 2)
|
||||
c.Assert(app.sshPolicy.Rules[0].SSHUsers, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[0].Principals, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[0].Principals[0].NodeIP, check.Matches, "100.64.0.1")
|
||||
|
||||
c.Assert(app.sshPolicy.Rules[1].SSHUsers, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[1].Principals, check.HasLen, 1)
|
||||
c.Assert(app.sshPolicy.Rules[1].Principals[0].NodeIP, check.Matches, "*")
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidGroupInGroup(c *check.C) {
|
||||
// this ACL is wrong because the group in Sources sections doesn't exist
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
|
@@ -17,7 +17,6 @@ type ACLPolicy struct {
|
||||
ACLs []ACL `json:"acls" yaml:"acls"`
|
||||
Tests []ACLTest `json:"tests" yaml:"tests"`
|
||||
AutoApprovers AutoApprovers `json:"autoApprovers" yaml:"autoApprovers"`
|
||||
SSHs []SSH `json:"ssh" yaml:"ssh"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
@@ -51,15 +50,6 @@ type AutoApprovers struct {
|
||||
ExitNode []string `json:"exitNode" yaml:"exitNode"`
|
||||
}
|
||||
|
||||
// SSH controls who can ssh into which machines.
|
||||
type SSH struct {
|
||||
Action string `json:"action" yaml:"action"`
|
||||
Sources []string `json:"src" yaml:"src"`
|
||||
Destinations []string `json:"dst" yaml:"dst"`
|
||||
Users []string `json:"users" yaml:"users"`
|
||||
CheckPeriod string `json:"checkPeriod,omitempty" yaml:"checkPeriod,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netip objects.
|
||||
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
|
@@ -35,7 +35,7 @@ func (h *Headscale) generateMapResponse(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := h.getMapResponseUserProfiles(*machine, peers)
|
||||
profiles := getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
@@ -62,7 +62,6 @@ func (h *Headscale) generateMapResponse(
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
SSHPolicy: h.sshPolicy,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
Debug: &tailcfg.Debug{
|
||||
|
36
app.go
36
app.go
@@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -88,7 +87,6 @@ type Headscale struct {
|
||||
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules []tailcfg.FilterRule
|
||||
sshPolicy *tailcfg.SSHPolicy
|
||||
|
||||
lastStateChange *xsync.MapOf[string, time.Time]
|
||||
|
||||
@@ -103,6 +101,27 @@ type Headscale struct {
|
||||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
|
||||
// Look up the TLS constant relative to user-supplied TLS client
|
||||
// authentication mode. If an unknown mode is supplied, the default
|
||||
// value, tls.RequireAnyClientCert, is returned. The returned boolean
|
||||
// indicates if the supplied mode was valid.
|
||||
func LookupTLSClientAuthMode(mode string) (tls.ClientAuthType, bool) {
|
||||
switch mode {
|
||||
case DisabledClientAuth:
|
||||
// Client cert is _not_ required.
|
||||
return tls.NoClientCert, true
|
||||
case RelaxedClientAuth:
|
||||
// Client cert required, but _not verified_.
|
||||
return tls.RequireAnyClientCert, true
|
||||
case EnforcedClientAuth:
|
||||
// Client cert is _required and verified_.
|
||||
return tls.RequireAndVerifyClientCert, true
|
||||
default:
|
||||
// Return the default when an unknown value is supplied.
|
||||
return tls.RequireAnyClientCert, false
|
||||
}
|
||||
}
|
||||
|
||||
func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
@@ -129,12 +148,8 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
|
||||
cfg.DBuser,
|
||||
)
|
||||
|
||||
if sslEnabled, err := strconv.ParseBool(cfg.DBssl); err == nil {
|
||||
if !sslEnabled {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
} else {
|
||||
dbString += fmt.Sprintf(" sslmode=%s", cfg.DBssl)
|
||||
if !cfg.DBssl {
|
||||
dbString += " sslmode=disable"
|
||||
}
|
||||
|
||||
if cfg.DBport != 0 {
|
||||
@@ -840,7 +855,12 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf(
|
||||
"Client authentication (mTLS) is \"%s\". See the docs to learn about configuring this setting.",
|
||||
h.cfg.TLS.ClientAuthMode))
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: h.cfg.TLS.ClientAuthMode,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
|
17
app_test.go
17
app_test.go
@@ -59,3 +59,20 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
}
|
||||
app.db = db
|
||||
}
|
||||
|
||||
// Enusre an error is returned when an invalid auth mode
|
||||
// is supplied.
|
||||
func (s *Suite) TestInvalidClientAuthMode(c *check.C) {
|
||||
_, isValid := LookupTLSClientAuthMode("invalid")
|
||||
c.Assert(isValid, check.Equals, false)
|
||||
}
|
||||
|
||||
// Ensure that all client auth modes return a nil error.
|
||||
func (s *Suite) TestAuthModes(c *check.C) {
|
||||
modes := []string{"disabled", "relaxed", "enforced"}
|
||||
|
||||
for _, v := range modes {
|
||||
_, isValid := LookupTLSClientAuthMode(v)
|
||||
c.Assert(isValid, check.Equals, true)
|
||||
}
|
||||
}
|
||||
|
@@ -134,9 +134,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.Machine,
|
||||
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
SuccessOutput(response.Machine, fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
|
||||
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc") {
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -55,10 +55,10 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
@@ -98,10 +98,10 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
|
@@ -14,9 +14,7 @@ server_url: http://127.0.0.1:8080
|
||||
|
||||
# Address to listen to / bind to on the server
|
||||
#
|
||||
# For production:
|
||||
# listen_addr: 0.0.0.0:8080
|
||||
listen_addr: 127.0.0.1:8080
|
||||
listen_addr: 0.0.0.0:8080
|
||||
|
||||
# Address to listen to /metrics, you may want
|
||||
# to keep this endpoint private to your internal
|
||||
@@ -29,10 +27,7 @@ metrics_listen_addr: 127.0.0.1:9090
|
||||
# remotely with the CLI
|
||||
# Note: Remote access _only_ works if you have
|
||||
# valid certificates.
|
||||
#
|
||||
# For production:
|
||||
# grpc_listen_addr: 0.0.0.0:50443
|
||||
grpc_listen_addr: 127.0.0.1:50443
|
||||
grpc_listen_addr: 0.0.0.0:50443
|
||||
|
||||
# Allow the gRPC admin interface to run in INSECURE
|
||||
# mode. This is not recommended as the traffic will
|
||||
@@ -43,10 +38,7 @@ grpc_allow_insecure: false
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
# For production:
|
||||
# /var/lib/headscale/private.key
|
||||
private_key_path: ./private.key
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
@@ -55,10 +47,7 @@ noise:
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
#
|
||||
# For production:
|
||||
# private_key_path: /var/lib/headscale/noise_private.key
|
||||
private_key_path: ./noise_private.key
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
@@ -130,10 +119,7 @@ node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
|
||||
# For production:
|
||||
# db_path: /var/lib/headscale/db.sqlite
|
||||
db_path: ./db.sqlite
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
@@ -143,9 +129,6 @@ db_path: ./db.sqlite
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
|
||||
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
|
||||
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
|
||||
# db_ssl: false
|
||||
|
||||
### TLS configuration
|
||||
@@ -164,11 +147,16 @@ acme_email: ""
|
||||
# Domain name to request a TLS certificate for:
|
||||
tls_letsencrypt_hostname: ""
|
||||
|
||||
# Client (Tailscale/Browser) authentication mode (mTLS)
|
||||
# Acceptable values:
|
||||
# - disabled: client authentication disabled
|
||||
# - relaxed: client certificate is required but not verified
|
||||
# - enforced: client certificate is required and verified
|
||||
tls_client_auth_mode: relaxed
|
||||
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
# For production:
|
||||
# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
tls_letsencrypt_cache_dir: ./cache
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS-ALPN-01
|
||||
@@ -210,18 +198,6 @@ dns_config:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
|
||||
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
|
||||
# "abc123" is example NextDNS ID, replace with yours.
|
||||
#
|
||||
# With metadata sharing:
|
||||
# nameservers:
|
||||
# - https://dns.nextdns.io/abc123
|
||||
#
|
||||
# Without metadata sharing:
|
||||
# nameservers:
|
||||
# - 2a07:a8c0::ab:c123
|
||||
# - 2a07:a8c1::ab:c123
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# list of search domains and the DNS to query for each one.
|
||||
#
|
||||
@@ -246,9 +222,9 @@ dns_config:
|
||||
base_domain: example.com
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for production you will want to set this to something like:
|
||||
# unix_socket: /var/run/headscale.sock
|
||||
unix_socket: ./headscale.sock
|
||||
# Note: for local development, you probably want to change this to:
|
||||
# unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
|
75
config.go
75
config.go
@@ -1,6 +1,7 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -51,7 +52,7 @@ type Config struct {
|
||||
DBname string
|
||||
DBuser string
|
||||
DBpass string
|
||||
DBssl string
|
||||
DBssl bool
|
||||
|
||||
TLS TLSConfig
|
||||
|
||||
@@ -74,8 +75,9 @@ type Config struct {
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
CertPath string
|
||||
KeyPath string
|
||||
CertPath string
|
||||
KeyPath string
|
||||
ClientAuthMode tls.ClientAuthType
|
||||
|
||||
LetsEncrypt LetsEncryptConfig
|
||||
}
|
||||
@@ -152,6 +154,7 @@ func LoadConfig(path string, isFile bool) error {
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
|
||||
viper.SetDefault("tls_client_auth_mode", "relaxed")
|
||||
|
||||
viper.SetDefault("log.level", "info")
|
||||
viper.SetDefault("log.format", TextLogFormat)
|
||||
@@ -182,10 +185,6 @@ func LoadConfig(path string, isFile bool) error {
|
||||
|
||||
viper.SetDefault("node_update_check_interval", "10s")
|
||||
|
||||
if IsCLIConfigured() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to read configuration from disk")
|
||||
|
||||
@@ -221,6 +220,19 @@ func LoadConfig(path string, isFile bool) error {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
|
||||
_, authModeValid := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
if !authModeValid {
|
||||
errorText += fmt.Sprintf(
|
||||
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
DisabledClientAuth,
|
||||
RelaxedClientAuth,
|
||||
EnforcedClientAuth)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
@@ -250,6 +262,10 @@ func LoadConfig(path string, isFile bool) error {
|
||||
}
|
||||
|
||||
func GetTLSConfig() TLSConfig {
|
||||
tlsClientAuthMode, _ := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
return TLSConfig{
|
||||
LetsEncrypt: LetsEncryptConfig{
|
||||
Hostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
@@ -265,6 +281,7 @@ func GetTLSConfig() TLSConfig {
|
||||
KeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_key_path"),
|
||||
),
|
||||
ClientAuthMode: tlsClientAuthMode,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -366,21 +383,10 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := []netip.Addr{}
|
||||
resolvers := []*dnstype.Resolver{}
|
||||
nameservers := make([]netip.Addr, len(nameserversStr))
|
||||
resolvers := make([]*dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for _, nameserverStr := range nameserversStr {
|
||||
// Search for explicit DNS-over-HTTPS resolvers
|
||||
if strings.HasPrefix(nameserverStr, "https://") {
|
||||
resolvers = append(resolvers, &dnstype.Resolver{
|
||||
Addr: nameserverStr,
|
||||
})
|
||||
|
||||
// This nameserver can not be parsed as an IP address
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse nameserver as a regular IP
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netip.ParseAddr(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
@@ -389,10 +395,10 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers = append(nameservers, nameserver)
|
||||
resolvers = append(resolvers, &dnstype.Resolver{
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
@@ -463,17 +469,6 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
}
|
||||
|
||||
func GetHeadscaleConfig() (*Config, error) {
|
||||
if IsCLIConfigured() {
|
||||
return &Config{
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
logConfig := GetLogTailConfig()
|
||||
@@ -545,7 +540,7 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
DBssl: viper.GetString("db_ssl"),
|
||||
DBssl: viper.GetBool("db_ssl"),
|
||||
|
||||
TLS: GetTLSConfig(),
|
||||
|
||||
@@ -574,8 +569,6 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
LogTail: logConfig,
|
||||
RandomizeClientPort: randomizeClientPort,
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
@@ -583,10 +576,8 @@ func GetHeadscaleConfig() (*Config, error) {
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
|
||||
Log: GetLogConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsCLIConfigured() bool {
|
||||
return viper.GetString("cli.address") != "" && viper.GetString("cli.api_key") != ""
|
||||
}
|
||||
|
35
dns.go
35
dns.go
@@ -3,13 +3,11 @@ package headscale
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
@@ -22,10 +20,6 @@ const (
|
||||
ipv6AddressLength = 128
|
||||
)
|
||||
|
||||
const (
|
||||
nextDNSDoHPrefix = "https://dns.nextdns.io"
|
||||
)
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
|
||||
// server (listening in 100.100.100.100 udp/53) should be used for.
|
||||
@@ -158,39 +152,16 @@ func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
return fqdns
|
||||
}
|
||||
|
||||
// If any nextdns DoH resolvers are present in the list of resolvers it will
|
||||
// take metadata from the machine metadata and instruct tailscale to add it
|
||||
// to the requests. This makes it possible to identify from which device the
|
||||
// requests come in the NextDNS dashboard.
|
||||
//
|
||||
// This will produce a resolver like:
|
||||
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine Machine) {
|
||||
for _, resolver := range resolvers {
|
||||
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
||||
attrs := url.Values{
|
||||
"device_name": []string{machine.Hostname},
|
||||
"device_model": []string{machine.HostInfo.OS},
|
||||
}
|
||||
|
||||
if len(machine.IPAddresses) > 0 {
|
||||
attrs.Add("device_ip", machine.IPAddresses[0].String())
|
||||
}
|
||||
|
||||
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
) *tailcfg.DNSConfig {
|
||||
var dnsConfig *tailcfg.DNSConfig = dnsConfigOrig.Clone()
|
||||
var dnsConfig *tailcfg.DNSConfig
|
||||
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
|
||||
// Only inject the Search Domain of the current namespace - shared nodes should use their full FQDN
|
||||
dnsConfig = dnsConfigOrig.Clone()
|
||||
dnsConfig.Domains = append(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf(
|
||||
@@ -213,7 +184,5 @@ func getMapResponseDNSConfig(
|
||||
dnsConfig = dnsConfigOrig
|
||||
}
|
||||
|
||||
addNextDNSMetadata(dnsConfig.Resolvers, machine)
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
|
@@ -1,48 +0,0 @@
|
||||
# Better route management
|
||||
|
||||
As of today, route management in Headscale is very basic and does not allow for much flexibility, including implementing subnet HA, 4via6 or more advanced features. We also have a number of bugs (e.g., routes exposed by ephemeral nodes)
|
||||
|
||||
This proposal aims to improve the route management.
|
||||
|
||||
## Current situation
|
||||
|
||||
Routes advertised by the nodes are read from the Hostinfo struct. If approved from the the CLI or via autoApprovers, the route is added to the EnabledRoutes field in `Machine`.
|
||||
|
||||
This means that the advertised routes are not persisted in the database, as Hostinfo is always replaced. In the same way, EnabledRoutes can get out of sync with the actual routes in the node.
|
||||
|
||||
In case of colliding routes (i.e., subnets that are exposed from multiple nodes), we are currently just sending all of them in `PrimaryRoutes`... and hope for the best. (`PrimaryRoutes` is the field in `Node` used for subnet failover).
|
||||
|
||||
## Proposal
|
||||
|
||||
The core part is to create a new `Route` struct (and DB table), with the following fields:
|
||||
|
||||
```go
|
||||
type Route struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
|
||||
Machine *Machine
|
||||
Prefix IPPrefix
|
||||
|
||||
Advertised bool
|
||||
Enabled bool
|
||||
IsPrimary bool
|
||||
|
||||
|
||||
CreatedAt *time.Time
|
||||
UpdatedAt *time.Time
|
||||
DeletedAt *time.Time
|
||||
}
|
||||
```
|
||||
|
||||
- The `Advertised` field is set to true if the route is being advertised by the node. It is set to false if the route is removed. This way we can indicate if a later enabled route has stopped being advertised. A similar behaviour happens in the Tailscale.com control panel.
|
||||
|
||||
- The `Enabled` field is set to true if the route is enabled - via CLI or autoApprovers.
|
||||
|
||||
- `IsPrimary` indicates if Headscale has selected this route as the primary route for that particular subnet. This allows us to implement subnet failover. This would be fully automatic if there is more than subnet routers advertising the same network - which is the behaviour of Tailscale.com.
|
||||
|
||||
## Stuff to bear in mind
|
||||
|
||||
- We need to make sure to migrate the current `EnabledRoutes` of `Machine` into the new table.
|
||||
- When a node stops sharing a subnet, I reckon we should mark it both as not `Advertised` and not `Enabled`. Users should re-enable it if the node advertises it again.
|
||||
- If only one subnet router is advertising a subnet, we should mark it as primary.
|
||||
- Regarding subnet failover, the current behaviour of Tailscale.com is to perform the failover after 15 seconds from the node disconnecting from their control panel. I reckon we cannot do the same currently. Our maximum granularity is the keep alive period.
|
@@ -58,7 +58,6 @@ private_key_path: /etc/headscale/private.key
|
||||
noise:
|
||||
private_key_path: /etc/headscale/noise_private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
db_type: sqlite3
|
||||
db_path: /etc/headscale/db.sqlite
|
||||
```
|
||||
|
||||
|
14
docs/tls.md
14
docs/tls.md
@@ -29,3 +29,17 @@ headscale can also be configured to expose its web service via TLS. To configure
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
### Configuring Mutual TLS Authentication (mTLS)
|
||||
|
||||
mTLS is a method by which an HTTPS server authenticates clients, e.g. Tailscale, using TLS certificates. This can be configured by applying one of the following values to the `tls_client_auth_mode` setting in the configuration file.
|
||||
|
||||
| Value | Behavior |
|
||||
| ------------------- | ---------------------------------------------------------- |
|
||||
| `disabled` | Disable mTLS. |
|
||||
| `relaxed` (default) | A client certificate is required, but it is not verified. |
|
||||
| `enforced` | Requires clients to supply a certificate that is verified. |
|
||||
|
||||
```yaml
|
||||
tls_client_auth_mode: ""
|
||||
```
|
||||
|
@@ -1,302 +0,0 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerContextPath = "../."
|
||||
hsicOIDCMockHashLength = 6
|
||||
oidcServerPort = 10000
|
||||
)
|
||||
|
||||
var errStatusCodeNotOK = errors.New("status code not OK")
|
||||
|
||||
type AuthOIDCScenario struct {
|
||||
*Scenario
|
||||
|
||||
mockOIDC *dockertest.Resource
|
||||
}
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
scenario := AuthOIDCScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
oidcConfig, err := scenario.runMockOIDC()
|
||||
if err != nil {
|
||||
t.Errorf("failed to run mock OIDC server: %s", err)
|
||||
}
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
|
||||
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret,
|
||||
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
spec,
|
||||
hsic.WithTestName("oidcauthping"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithHostnameAsServerURL(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range namespaces {
|
||||
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
|
||||
err = s.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
|
||||
|
||||
mockOidcOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Cmd: []string{"headscale", "mockoidc"},
|
||||
ExposedPorts: []string{"10000/tcp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"10000/tcp": {{HostPort: "10000"}},
|
||||
},
|
||||
Networks: []*dockertest.Network{s.Scenario.network},
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
|
||||
"MOCKOIDC_PORT=10000",
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
},
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: dockerContextPath,
|
||||
}
|
||||
|
||||
err := s.pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
dockertestutil.DockerRestartPolicy); err == nil {
|
||||
s.mockOIDC = pmockoidc
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale mock oidc to be ready for tests")
|
||||
hostEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.mockOIDC.GetIPInNetwork(s.network),
|
||||
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
httpClient := &http.Client{}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
||||
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errStatusCodeNotOK
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
|
||||
|
||||
return &headscale.OIDCConfig{
|
||||
Issuer: fmt.Sprintf("http://%s/oidc",
|
||||
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
|
||||
ClientID: "superclient",
|
||||
ClientSecret: "supersecret",
|
||||
StripEmaildomain: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) runTailscaleUp(
|
||||
namespaceStr, loginServer string,
|
||||
) error {
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("running tailscale up for namespace %s", namespaceStr)
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.joinWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.joinWaitGroup.Done()
|
||||
|
||||
// TODO(juanfont): error handle this
|
||||
loginURL, err := c.UpWithLoginURL(loginServer)
|
||||
if err != nil {
|
||||
log.Printf("failed to run tailscale up: %s", err)
|
||||
}
|
||||
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
|
||||
loginURL.Scheme = "http"
|
||||
|
||||
insecureTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint
|
||||
}
|
||||
|
||||
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
|
||||
|
||||
httpClient := &http.Client{Transport: insecureTransport}
|
||||
ctx := context.Background()
|
||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to get login url %s: %s", c.Hostname(), loginURL, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
_, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("%s failed to read response body: %s", c.Hostname(), err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Finished request for %s to join tailnet", c.Hostname())
|
||||
}(client)
|
||||
|
||||
err = client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
log.Printf("client %s is ready", client.Hostname())
|
||||
}
|
||||
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) Shutdown() error {
|
||||
err := s.pool.Purge(s.mockOIDC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Scenario.Shutdown()
|
||||
}
|
@@ -10,8 +10,6 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
)
|
||||
|
||||
var errParseAuthPage = errors.New("failed to parse auth page")
|
||||
@@ -22,7 +20,6 @@ type AuthWebFlowScenario struct {
|
||||
|
||||
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
@@ -38,7 +35,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping"))
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
@@ -79,16 +76,13 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
func (s *AuthWebFlowScenario) CreateHeadscaleEnv(namespaces map[string]int) error {
|
||||
err := s.StartHeadscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
err = s.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -105,7 +99,7 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
|
||||
err = s.runTailscaleUp(namespaceName, s.Headscale().GetEndpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -135,12 +129,12 @@ func (s *AuthWebFlowScenario) runTailscaleUp(
|
||||
if err != nil {
|
||||
log.Printf("failed to register client: %s", err)
|
||||
}
|
||||
}(client)
|
||||
|
||||
err := client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
err = c.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", c.Hostname(), err)
|
||||
}
|
||||
}(client)
|
||||
}
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
@@ -151,13 +145,8 @@ func (s *AuthWebFlowScenario) runTailscaleUp(
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runHeadscaleRegister(namespaceStr string, loginURL *url.URL) error {
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("loginURL: %s", loginURL)
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
|
||||
loginURL.Host = fmt.Sprintf("%s:8080", s.Headscale().GetIP())
|
||||
loginURL.Scheme = "http"
|
||||
|
||||
httpClient := &http.Client{}
|
||||
@@ -188,10 +177,8 @@ func (s *AuthWebFlowScenario) runHeadscaleRegister(namespaceStr string, loginURL
|
||||
key := keySep[1]
|
||||
log.Printf("registering node %s", key)
|
||||
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
_, err = headscale.Execute(
|
||||
[]string{"headscale", "-n", namespaceStr, "nodes", "register", "--key", key},
|
||||
)
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
_, err = headscale.Execute([]string{"headscale", "-n", namespaceStr, "nodes", "register", "--key", key})
|
||||
if err != nil {
|
||||
log.Printf("failed to register node: %s", err)
|
||||
|
||||
|
@@ -7,8 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -38,14 +36,11 @@ func TestNamespaceCommand(t *testing.T) {
|
||||
"namespace2": 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(headscale,
|
||||
err = executeAndUnmarshal(scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
@@ -66,7 +61,7 @@ func TestNamespaceCommand(t *testing.T) {
|
||||
result,
|
||||
)
|
||||
|
||||
_, err = headscale.Execute(
|
||||
_, err = scenario.Headscale().Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
@@ -80,7 +75,7 @@ func TestNamespaceCommand(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listAfterRenameNamespaces []v1.Namespace
|
||||
err = executeAndUnmarshal(headscale,
|
||||
err = executeAndUnmarshal(scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"namespaces",
|
||||
@@ -119,10 +114,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
keys := make([]*v1.PreAuthKey, count)
|
||||
@@ -131,7 +123,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
for index := 0; index < count; index++ {
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err := executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -157,7 +149,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -210,7 +202,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test key expiry
|
||||
_, err = headscale.Execute(
|
||||
_, err = scenario.Headscale().Execute(
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -224,7 +216,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
|
||||
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -259,15 +251,12 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -284,7 +273,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -324,15 +313,12 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
namespace: 0,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var preAuthReusableKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -349,7 +335,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
|
||||
var preAuthEphemeralKey v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
@@ -369,7 +355,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
|
||||
var listedPreAuthKeys []v1.PreAuthKey
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
scenario.Headscale(),
|
||||
[]string{
|
||||
"headscale",
|
||||
"preauthkeys",
|
||||
|
@@ -13,7 +13,4 @@ type ControlServer interface {
|
||||
CreateNamespace(namespace string) error
|
||||
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
|
||||
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
|
||||
GetCert() []byte
|
||||
GetHostname() string
|
||||
GetIP() string
|
||||
}
|
||||
|
@@ -6,14 +6,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func TestPingAllByIP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
@@ -25,7 +22,7 @@ func TestPingAllByIP(t *testing.T) {
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
@@ -68,7 +65,6 @@ func TestPingAllByIP(t *testing.T) {
|
||||
|
||||
func TestPingAllByHostname(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
@@ -81,7 +77,7 @@ func TestPingAllByHostname(t *testing.T) {
|
||||
"namespace4": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname"))
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
@@ -122,13 +118,8 @@ func TestPingAllByHostname(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestTaildrop(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) error {
|
||||
var err error
|
||||
@@ -153,7 +144,7 @@ func TestTaildrop(t *testing.T) {
|
||||
"taildrop": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop"))
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
@@ -268,7 +259,6 @@ func TestTaildrop(t *testing.T) {
|
||||
|
||||
func TestResolveMagicDNS(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
@@ -281,7 +271,7 @@ func TestResolveMagicDNS(t *testing.T) {
|
||||
"magicdns2": len(TailscaleVersions) - 1,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
|
||||
err = scenario.CreateHeadscaleEnv(spec)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
@@ -1,118 +1,67 @@
|
||||
package hsic
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
hsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
aclPolicyPath = "/etc/headscale/acl.hujson"
|
||||
tlsCertPath = "/etc/headscale/tls.cert"
|
||||
tlsKeyPath = "/etc/headscale/tls.key"
|
||||
headscaleDefaultPort = 8080
|
||||
hsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
aclPolicyPath = "/etc/headscale/acl.hujson"
|
||||
)
|
||||
|
||||
var errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
|
||||
|
||||
type HeadscaleInContainer struct {
|
||||
hostname string
|
||||
port int
|
||||
|
||||
pool *dockertest.Pool
|
||||
container *dockertest.Resource
|
||||
network *dockertest.Network
|
||||
|
||||
// optional config
|
||||
port int
|
||||
aclPolicy *headscale.ACLPolicy
|
||||
env []string
|
||||
tlsCert []byte
|
||||
tlsKey []byte
|
||||
}
|
||||
|
||||
type Option = func(c *HeadscaleInContainer)
|
||||
|
||||
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
// TODO(kradalby): Move somewhere appropriate
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
|
||||
|
||||
hsic.aclPolicy = acl
|
||||
}
|
||||
}
|
||||
|
||||
func WithTLS() Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
cert, key, err := createCertificate()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create certificates for headscale test: %s", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Move somewhere appropriate
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_CERT_PATH=%s", tlsCertPath))
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_KEY_PATH=%s", tlsKeyPath))
|
||||
|
||||
hsic.tlsCert = cert
|
||||
hsic.tlsKey = key
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigEnv(configEnv map[string]string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
env := []string{}
|
||||
|
||||
for key, value := range configEnv {
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("%s=%s", key, value))
|
||||
env = append(env, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithPort(port int) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.port = port
|
||||
}
|
||||
}
|
||||
|
||||
func WithTestName(testName string) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
|
||||
hsic.hostname = hostname
|
||||
}
|
||||
}
|
||||
|
||||
func WithHostnameAsServerURL() Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.env = append(
|
||||
hsic.env,
|
||||
fmt.Sprintf("HEADSCALE_SERVER_URL=http://%s:%d",
|
||||
hsic.GetHostname(),
|
||||
hsic.port,
|
||||
))
|
||||
hsic.env = env
|
||||
}
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
port int,
|
||||
network *dockertest.Network,
|
||||
opts ...Option,
|
||||
) (*HeadscaleInContainer, error) {
|
||||
@@ -122,10 +71,11 @@ func New(
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s", hash)
|
||||
portProto := fmt.Sprintf("%d/tcp", port)
|
||||
|
||||
hsic := &HeadscaleInContainer{
|
||||
hostname: hostname,
|
||||
port: headscaleDefaultPort,
|
||||
port: port,
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
@@ -135,9 +85,9 @@ func New(
|
||||
opt(hsic)
|
||||
}
|
||||
|
||||
log.Println("NAME: ", hsic.hostname)
|
||||
|
||||
portProto := fmt.Sprintf("%d/tcp", hsic.port)
|
||||
if hsic.aclPolicy != nil {
|
||||
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
@@ -145,7 +95,7 @@ func New(
|
||||
}
|
||||
|
||||
runOptions := &dockertest.RunOptions{
|
||||
Name: hsic.hostname,
|
||||
Name: hostname,
|
||||
ExposedPorts: []string{portProto},
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{"headscale", "serve"},
|
||||
@@ -158,7 +108,7 @@ func New(
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
err = pool.RemoveContainerByName(hsic.hostname)
|
||||
err = pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -173,7 +123,7 @@ func New(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not start headscale container: %w", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hsic.hostname)
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
hsic.container = container
|
||||
|
||||
@@ -194,25 +144,9 @@ func New(
|
||||
}
|
||||
}
|
||||
|
||||
if hsic.hasTLS() {
|
||||
err = hsic.WriteFile(tlsCertPath, hsic.tlsCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
|
||||
}
|
||||
|
||||
err = hsic.WriteFile(tlsKeyPath, hsic.tlsKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS key to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return hsic, nil
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) hasTLS() bool {
|
||||
return len(t.tlsCert) != 0 && len(t.tlsKey) != 0
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) Shutdown() error {
|
||||
return t.pool.Purge(t.container)
|
||||
}
|
||||
@@ -220,6 +154,8 @@ func (t *HeadscaleInContainer) Shutdown() error {
|
||||
func (t *HeadscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, error) {
|
||||
log.Println("command", command)
|
||||
log.Printf("running command for %s\n", t.hostname)
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
@@ -228,13 +164,13 @@ func (t *HeadscaleInContainer) Execute(
|
||||
if err != nil {
|
||||
log.Printf("command stderr: %s\n", stderr)
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
if stdout != "" {
|
||||
log.Printf("command stdout: %s\n", stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
@@ -247,7 +183,11 @@ func (t *HeadscaleInContainer) GetPort() string {
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
|
||||
return fmt.Sprintf("%s/health", t.GetEndpoint())
|
||||
hostEndpoint := fmt.Sprintf("%s:%d",
|
||||
t.GetIP(),
|
||||
t.port)
|
||||
|
||||
return fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetEndpoint() string {
|
||||
@@ -255,36 +195,16 @@ func (t *HeadscaleInContainer) GetEndpoint() string {
|
||||
t.GetIP(),
|
||||
t.port)
|
||||
|
||||
if t.hasTLS() {
|
||||
return fmt.Sprintf("https://%s", hostEndpoint)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("http://%s", hostEndpoint)
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetCert() []byte {
|
||||
return t.tlsCert
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) GetHostname() string {
|
||||
return t.hostname
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WaitForReady() error {
|
||||
url := t.GetHealthEndpoint()
|
||||
|
||||
log.Printf("waiting for headscale to be ready at %s", url)
|
||||
|
||||
client := &http.Client{}
|
||||
|
||||
if t.hasTLS() {
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint
|
||||
client = &http.Client{Transport: insecureTransport}
|
||||
}
|
||||
|
||||
return t.pool.Retry(func() error {
|
||||
resp, err := client.Get(url) //nolint
|
||||
resp, err := http.Get(url) //nolint
|
||||
if err != nil {
|
||||
return fmt.Errorf("headscale is not ready: %w", err)
|
||||
}
|
||||
@@ -372,87 +292,55 @@ func (t *HeadscaleInContainer) ListMachinesInNamespace(
|
||||
}
|
||||
|
||||
func (t *HeadscaleInContainer) WriteFile(path string, data []byte) error {
|
||||
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
|
||||
}
|
||||
dirPath, fileName := filepath.Split(path)
|
||||
|
||||
// nolint
|
||||
func createCertificate() ([]byte, []byte, error) {
|
||||
// From:
|
||||
// https://shaneutt.com/blog/golang-ca-and-signed-cert-go/
|
||||
file := bytes.NewReader(data)
|
||||
|
||||
ca := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(2019),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Headscale testing INC"},
|
||||
Country: []string{"NL"},
|
||||
Locality: []string{"Leiden"},
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(30 * time.Minute),
|
||||
IsCA: true,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
|
||||
tarWriter := tar.NewWriter(buf)
|
||||
|
||||
header := &tar.Header{
|
||||
Name: fileName,
|
||||
Size: file.Size(),
|
||||
// Mode: int64(stat.Mode()),
|
||||
// ModTime: stat.ModTime(),
|
||||
}
|
||||
|
||||
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
err := tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return fmt.Errorf("failed write file header to tar: %w", err)
|
||||
}
|
||||
|
||||
cert := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1658),
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Headscale testing INC"},
|
||||
Country: []string{"NL"},
|
||||
Locality: []string{"Leiden"},
|
||||
},
|
||||
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(30 * time.Minute),
|
||||
SubjectKeyId: []byte{1, 2, 3, 4, 6},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
}
|
||||
|
||||
certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return fmt.Errorf("failed to copy file to tar: %w", err)
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(
|
||||
rand.Reader,
|
||||
cert,
|
||||
ca,
|
||||
&certPrivKey.PublicKey,
|
||||
caPrivKey,
|
||||
err = tarWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close tar: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("tar: %s", buf.String())
|
||||
|
||||
// Ensure the directory is present inside the container
|
||||
_, err = t.Execute([]string{"mkdir", "-p", dirPath})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure directory: %w", err)
|
||||
}
|
||||
|
||||
err = t.pool.Client.UploadToContainer(
|
||||
t.container.Container.ID,
|
||||
docker.UploadToContainerOptions{
|
||||
NoOverwriteDirNonDir: false,
|
||||
Path: dirPath,
|
||||
InputStream: bytes.NewReader(buf.Bytes()),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
certPEM := new(bytes.Buffer)
|
||||
|
||||
err = pem.Encode(certPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certBytes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
certPrivKeyPEM := new(bytes.Buffer)
|
||||
|
||||
err = pem.Encode(certPrivKeyPEM, &pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,74 +0,0 @@
|
||||
package integrationutil
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
func WriteFileToContainer(
|
||||
pool *dockertest.Pool,
|
||||
container *dockertest.Resource,
|
||||
path string,
|
||||
data []byte,
|
||||
) error {
|
||||
dirPath, fileName := filepath.Split(path)
|
||||
|
||||
file := bytes.NewReader(data)
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
|
||||
tarWriter := tar.NewWriter(buf)
|
||||
|
||||
header := &tar.Header{
|
||||
Name: fileName,
|
||||
Size: file.Size(),
|
||||
// Mode: int64(stat.Mode()),
|
||||
// ModTime: stat.ModTime(),
|
||||
}
|
||||
|
||||
err := tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed write file header to tar: %w", err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy file to tar: %w", err)
|
||||
}
|
||||
|
||||
err = tarWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close tar: %w", err)
|
||||
}
|
||||
|
||||
// Ensure the directory is present inside the container
|
||||
_, _, err = dockertestutil.ExecuteCommand(
|
||||
container,
|
||||
[]string{"mkdir", "-p", dirPath},
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure directory: %w", err)
|
||||
}
|
||||
|
||||
err = pool.Client.UploadToContainer(
|
||||
container.Container.ID,
|
||||
docker.UploadToContainerOptions{
|
||||
NoOverwriteDirNonDir: false,
|
||||
Path: dirPath,
|
||||
InputStream: bytes.NewReader(buf.Bytes()),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -15,28 +15,22 @@ import (
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/puzpuzpuz/xsync/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
scenarioHashLength = 6
|
||||
maxWait = 60 * time.Second
|
||||
headscalePort = 8080
|
||||
)
|
||||
|
||||
var (
|
||||
errNoHeadscaleAvailable = errors.New("no headscale available")
|
||||
errNoNamespaceAvailable = errors.New("no namespace available")
|
||||
|
||||
// Tailscale started adding TS2021 support in CapabilityVersion>=28 (v1.24.0), but
|
||||
// proper support in Headscale was only added for CapabilityVersion>=39 clients (v1.30.0).
|
||||
tailscaleVersions2021 = []string{
|
||||
TailscaleVersions = []string{
|
||||
"head",
|
||||
"unstable",
|
||||
"1.32.1",
|
||||
"1.30.2",
|
||||
}
|
||||
|
||||
tailscaleVersions2019 = []string{
|
||||
"1.28.0",
|
||||
"1.26.2",
|
||||
"1.24.2",
|
||||
@@ -44,20 +38,13 @@ var (
|
||||
"1.20.4",
|
||||
"1.18.2",
|
||||
"1.16.2",
|
||||
|
||||
// These versions seem to fail when fetching from apt.
|
||||
// "1.14.6",
|
||||
// "1.12.4",
|
||||
// "1.10.2",
|
||||
// "1.8.7",
|
||||
}
|
||||
|
||||
// tailscaleVersionsUnavailable = []string{
|
||||
// // These versions seem to fail when fetching from apt.
|
||||
// "1.14.6",
|
||||
// "1.12.4",
|
||||
// "1.10.2",
|
||||
// "1.8.7",
|
||||
// }.
|
||||
|
||||
TailscaleVersions = append(
|
||||
tailscaleVersions2021,
|
||||
tailscaleVersions2019...,
|
||||
)
|
||||
)
|
||||
|
||||
type Namespace struct {
|
||||
@@ -72,14 +59,12 @@ type Namespace struct {
|
||||
type Scenario struct {
|
||||
// TODO(kradalby): support multiple headcales for later, currently only
|
||||
// use one.
|
||||
controlServers *xsync.MapOf[string, ControlServer]
|
||||
controlServers map[string]ControlServer
|
||||
|
||||
namespaces map[string]*Namespace
|
||||
|
||||
pool *dockertest.Pool
|
||||
network *dockertest.Network
|
||||
|
||||
headscaleLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewScenario() (*Scenario, error) {
|
||||
@@ -114,7 +99,7 @@ func NewScenario() (*Scenario, error) {
|
||||
}
|
||||
|
||||
return &Scenario{
|
||||
controlServers: xsync.NewMapOf[ControlServer](),
|
||||
controlServers: make(map[string]ControlServer),
|
||||
namespaces: make(map[string]*Namespace),
|
||||
|
||||
pool: pool,
|
||||
@@ -123,17 +108,12 @@ func NewScenario() (*Scenario, error) {
|
||||
}
|
||||
|
||||
func (s *Scenario) Shutdown() error {
|
||||
s.controlServers.Range(func(_ string, control ControlServer) bool {
|
||||
for _, control := range s.controlServers {
|
||||
err := control.Shutdown()
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"Failed to shut down control: %s",
|
||||
fmt.Errorf("failed to tear down control: %w", err),
|
||||
)
|
||||
return fmt.Errorf("failed to tear down control: %w", err)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
for namespaceName, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
@@ -170,31 +150,36 @@ func (s *Scenario) Namespaces() []string {
|
||||
// Note: These functions assume that there is a _single_ headscale instance for now
|
||||
|
||||
// TODO(kradalby): make port and headscale configurable, multiple instances support?
|
||||
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
|
||||
s.headscaleLock.Lock()
|
||||
defer s.headscaleLock.Unlock()
|
||||
|
||||
if headscale, ok := s.controlServers.Load("headscale"); ok {
|
||||
return headscale, nil
|
||||
}
|
||||
|
||||
headscale, err := hsic.New(s.pool, s.network, opts...)
|
||||
func (s *Scenario) StartHeadscale() error {
|
||||
headscale, err := hsic.New(s.pool, headscalePort, s.network,
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create headscale container: %w", err)
|
||||
return fmt.Errorf("failed to create headscale container: %w", err)
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed reach headscale container: %w", err)
|
||||
}
|
||||
s.controlServers["headscale"] = headscale
|
||||
|
||||
s.controlServers.Store("headscale", headscale)
|
||||
return nil
|
||||
}
|
||||
|
||||
return headscale, nil
|
||||
func (s *Scenario) Headscale() *hsic.HeadscaleInContainer {
|
||||
//nolint
|
||||
return s.controlServers["headscale"].(*hsic.HeadscaleInContainer)
|
||||
}
|
||||
|
||||
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
key, err := headscale.CreateAuthKey(namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", err)
|
||||
@@ -207,7 +192,7 @@ func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
|
||||
}
|
||||
|
||||
func (s *Scenario) CreateNamespace(namespace string) error {
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
if headscale, ok := s.controlServers["headscale"]; ok {
|
||||
err := headscale.CreateNamespace(namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create namespace: %w", err)
|
||||
@@ -229,7 +214,6 @@ func (s *Scenario) CreateTailscaleNodesInNamespace(
|
||||
namespaceStr string,
|
||||
requestedVersion string,
|
||||
count int,
|
||||
opts ...tsic.Option,
|
||||
) error {
|
||||
if namespace, ok := s.namespaces[namespaceStr]; ok {
|
||||
for i := 0; i < count; i++ {
|
||||
@@ -238,40 +222,16 @@ func (s *Scenario) CreateTailscaleNodesInNamespace(
|
||||
version = TailscaleVersions[i%len(TailscaleVersions)]
|
||||
}
|
||||
|
||||
headscale, err := s.Headscale()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tailscale node: %w", err)
|
||||
}
|
||||
|
||||
cert := headscale.GetCert()
|
||||
hostname := headscale.GetHostname()
|
||||
|
||||
namespace.createWaitGroup.Add(1)
|
||||
|
||||
opts = append(opts,
|
||||
tsic.WithHeadscaleTLS(cert),
|
||||
tsic.WithHeadscaleName(hostname),
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer namespace.createWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
tsClient, err := tsic.New(
|
||||
s.pool,
|
||||
version,
|
||||
s.network,
|
||||
opts...,
|
||||
)
|
||||
tsClient, err := tsic.New(s.pool, version, s.network)
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to create tailscale node: %s", err)
|
||||
}
|
||||
|
||||
err = tsClient.WaitForReady()
|
||||
if err != nil {
|
||||
// return fmt.Errorf("failed to add tailscale node: %w", err)
|
||||
log.Printf("failed to wait for tailscaled: %s", err)
|
||||
log.Printf("failed to add tailscale node: %s", err)
|
||||
}
|
||||
|
||||
namespace.Clients[tsClient.Hostname()] = tsClient
|
||||
@@ -298,13 +258,7 @@ func (s *Scenario) RunTailscaleUp(
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.Up(loginServer, authKey)
|
||||
}(client)
|
||||
|
||||
err := client.WaitForReady()
|
||||
if err != nil {
|
||||
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
namespace.joinWaitGroup.Wait()
|
||||
|
||||
return nil
|
||||
@@ -346,12 +300,13 @@ func (s *Scenario) WaitForTailscaleSync() error {
|
||||
// CreateHeadscaleEnv is a conventient method returning a set up Headcale
|
||||
// test environment with nodes of all versions, joined to the server with X
|
||||
// namespaces.
|
||||
func (s *Scenario) CreateHeadscaleEnv(
|
||||
namespaces map[string]int,
|
||||
tsOpts []tsic.Option,
|
||||
opts ...hsic.Option,
|
||||
) error {
|
||||
headscale, err := s.Headscale(opts...)
|
||||
func (s *Scenario) CreateHeadscaleEnv(namespaces map[string]int) error {
|
||||
err := s.StartHeadscale()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,7 +317,7 @@ func (s *Scenario) CreateHeadscaleEnv(
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, tsOpts...)
|
||||
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -372,7 +327,7 @@ func (s *Scenario) CreateHeadscaleEnv(
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
|
||||
err = s.RunTailscaleUp(namespaceName, s.Headscale().GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
)
|
||||
|
||||
// This file is intended to "test the test framework", by proxy it will also test
|
||||
// This file is intendet to "test the test framework", by proxy it will also test
|
||||
// some Headcsale/Tailscale stuff, but mostly in very simple ways.
|
||||
|
||||
func IntegrationSkip(t *testing.T) {
|
||||
@@ -21,13 +21,8 @@ func IntegrationSkip(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestHeadscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
@@ -39,12 +34,12 @@ func TestHeadscale(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.StartHeadscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = headscale.WaitForReady()
|
||||
err = scenario.Headscale().WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
}
|
||||
@@ -74,13 +69,8 @@ func TestHeadscale(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestCreateTailscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
namespace := "only-create-containers"
|
||||
|
||||
@@ -112,13 +102,8 @@ func TestCreateTailscale(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// If subtests are parallel, then they will start before setup is run.
|
||||
// This might mean we approach setup slightly wrong, but for now, ignore
|
||||
// the linter
|
||||
// nolint:tparallel
|
||||
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
@@ -132,11 +117,12 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("start-headscale", func(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
err = scenario.StartHeadscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
headscale := scenario.Headscale()
|
||||
err = headscale.WaitForReady()
|
||||
if err != nil {
|
||||
t.Errorf("headscale failed to become ready: %s", err)
|
||||
@@ -171,16 +157,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create start headcale: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.RunTailscaleUp(
|
||||
namespace,
|
||||
headscale.GetEndpoint(),
|
||||
key.GetKey(),
|
||||
)
|
||||
err = scenario.RunTailscaleUp(namespace, scenario.Headscale().GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
t.Errorf("failed to login: %s", err)
|
||||
}
|
||||
|
@@ -1,519 +0,0 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var retry = func(times int, sleepInterval time.Duration,
|
||||
doWork func() (string, string, error),
|
||||
) (string, string, error) {
|
||||
var result string
|
||||
var stderr string
|
||||
var err error
|
||||
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
tempResult, tempStderr, err := doWork()
|
||||
|
||||
result += tempResult
|
||||
stderr += tempStderr
|
||||
|
||||
if err == nil {
|
||||
return result, stderr, nil
|
||||
}
|
||||
|
||||
// If we get a permission denied error, we can fail immediately
|
||||
// since that is something we wont recover from by retrying.
|
||||
if err != nil && strings.Contains(stderr, "Permission denied (tailscale)") {
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
time.Sleep(sleepInterval)
|
||||
}
|
||||
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
func TestSSHOneNamespaceAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHMultipleNamespacesAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
"namespace2": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1", "namespace2"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
nsOneClients, err := scenario.ListTailscaleClients("namespace1")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
nsTwoClients, err := scenario.ListTailscaleClients("namespace2")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
testInterNamespaceSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
|
||||
for _, client := range sourceClients {
|
||||
for _, peer := range targetClients {
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testInterNamespaceSSH(nsOneClients, nsTwoClients)
|
||||
testInterNamespaceSSH(nsTwoClients, nsOneClients)
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHNoSSHConfigured(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshnoneconfigured"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHPermissionDenied(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSHIsBlockedInACL(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:integration-test": {"namespace1"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:80"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:integration-test"},
|
||||
Destinations: []string{"group:integration-test"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshisblockedinacl"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, peer := range allClients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHTimeout(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSSNamespaceOnlyIsolation(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespaceacl1": len(TailscaleVersions) - 5,
|
||||
"namespaceacl2": len(TailscaleVersions) - 5,
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec,
|
||||
[]tsic.Option{tsic.WithSSH()},
|
||||
hsic.WithACLPolicy(
|
||||
&headscale.ACLPolicy{
|
||||
Groups: map[string][]string{
|
||||
"group:ssh1": {"namespaceacl1"},
|
||||
"group:ssh2": {"namespaceacl2"},
|
||||
},
|
||||
ACLs: []headscale.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"*"},
|
||||
Destinations: []string{"*:*"},
|
||||
},
|
||||
},
|
||||
SSHs: []headscale.SSH{
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:ssh1"},
|
||||
Destinations: []string{"group:ssh1"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Sources: []string{"group:ssh2"},
|
||||
Destinations: []string{"group:ssh2"},
|
||||
Users: []string{"ssh-it-user"},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
hsic.WithTestName("sshtwonamespaceaclblock"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
ssh1Clients, err := scenario.ListTailscaleClients("namespaceacl1")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
ssh2Clients, err := scenario.ListTailscaleClients("namespaceacl2")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get FQDNs: %s", err)
|
||||
}
|
||||
|
||||
// TODO(kradalby,evenh): ACLs do currently not cover reject
|
||||
// cases properly, and currently will accept all incomming connections
|
||||
// as long as a rule is present.
|
||||
//
|
||||
// for _, client := range ssh1Clients {
|
||||
// for _, peer := range ssh2Clients {
|
||||
// if client.Hostname() == peer.Hostname() {
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// assertSSHPermissionDenied(t, client, peer)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// for _, client := range ssh2Clients {
|
||||
// for _, peer := range ssh1Clients {
|
||||
// if client.Hostname() == peer.Hostname() {
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// assertSSHPermissionDenied(t, client, peer)
|
||||
// }
|
||||
// }
|
||||
|
||||
for _, client := range ssh1Clients {
|
||||
for _, peer := range ssh1Clients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
for _, client := range ssh2Clients {
|
||||
for _, peer := range ssh2Clients {
|
||||
if client.Hostname() == peer.Hostname() {
|
||||
continue
|
||||
}
|
||||
|
||||
assertSSHHostname(t, client, peer)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
|
||||
t.Helper()
|
||||
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
command := []string{
|
||||
"ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
|
||||
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
|
||||
"'hostname'",
|
||||
}
|
||||
|
||||
return retry(10, 1*time.Second, func() (string, string, error) {
|
||||
return client.Execute(command)
|
||||
})
|
||||
}
|
||||
|
||||
func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, _, err := doSSH(t, client, peer)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
|
||||
}
|
||||
|
||||
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, err := doSSH(t, client, peer)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
assert.Contains(t, stderr, "Permission denied (tailscale)")
|
||||
}
|
||||
|
||||
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, err := doSSH(t, client, peer)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
assert.Contains(t, stderr, "Connection timed out")
|
||||
}
|
@@ -7,7 +7,6 @@ import (
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
//nolint
|
||||
type TailscaleClient interface {
|
||||
Hostname() string
|
||||
Shutdown() error
|
||||
@@ -21,5 +20,4 @@ type TailscaleClient interface {
|
||||
WaitForReady() error
|
||||
WaitForPeers(expected int) error
|
||||
Ping(hostnameOrIP string) error
|
||||
ID() string
|
||||
}
|
||||
|
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
@@ -21,15 +20,13 @@ import (
|
||||
const (
|
||||
tsicHashLength = 6
|
||||
dockerContextPath = "../."
|
||||
headscaleCertPath = "/usr/local/share/ca-certificates/headscale.crt"
|
||||
)
|
||||
|
||||
var (
|
||||
errTailscalePingFailed = errors.New("ping failed")
|
||||
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
|
||||
errTailscaleWrongPeerCount = errors.New("wrong peer count")
|
||||
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
|
||||
errTailscaleNotConnected = errors.New("tailscale not connected")
|
||||
errTailscalePingFailed = errors.New("ping failed")
|
||||
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
|
||||
errTailscaleWrongPeerCount = errors.New("wrong peer count")
|
||||
errTailscaleNotConnected = errors.New("tailscale not connected")
|
||||
)
|
||||
|
||||
type TailscaleInContainer struct {
|
||||
@@ -43,58 +40,12 @@ type TailscaleInContainer struct {
|
||||
// "cache"
|
||||
ips []netip.Addr
|
||||
fqdn string
|
||||
|
||||
// optional config
|
||||
headscaleCert []byte
|
||||
headscaleHostname string
|
||||
withSSH bool
|
||||
}
|
||||
|
||||
type Option = func(c *TailscaleInContainer)
|
||||
|
||||
func WithHeadscaleTLS(cert []byte) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.headscaleCert = cert
|
||||
}
|
||||
}
|
||||
|
||||
func WithOrCreateNetwork(network *dockertest.Network) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
if network != nil {
|
||||
tsic.network = network
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
network, err := dockertestutil.GetFirstOrCreateNetwork(
|
||||
tsic.pool,
|
||||
fmt.Sprintf("%s-network", tsic.hostname),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create network: %s", err)
|
||||
}
|
||||
|
||||
tsic.network = network
|
||||
}
|
||||
}
|
||||
|
||||
func WithHeadscaleName(hsName string) Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.headscaleHostname = hsName
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSH() Option {
|
||||
return func(tsic *TailscaleInContainer) {
|
||||
tsic.withSSH = true
|
||||
}
|
||||
}
|
||||
|
||||
func New(
|
||||
pool *dockertest.Pool,
|
||||
version string,
|
||||
network *dockertest.Network,
|
||||
opts ...Option,
|
||||
) (*TailscaleInContainer, error) {
|
||||
hash, err := headscale.GenerateRandomStringDNSSafe(tsicHashLength)
|
||||
if err != nil {
|
||||
@@ -103,38 +54,20 @@ func New(
|
||||
|
||||
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
|
||||
|
||||
tsic := &TailscaleInContainer{
|
||||
version: version,
|
||||
hostname: hostname,
|
||||
|
||||
pool: pool,
|
||||
network: network,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(tsic)
|
||||
}
|
||||
// TODO(kradalby): figure out why we need to "refresh" the network here.
|
||||
// network, err = dockertestutil.GetFirstOrCreateNetwork(pool, network.Network.Name)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{network},
|
||||
// Cmd: []string{
|
||||
// "tailscaled", "--tun=tsdev",
|
||||
// },
|
||||
Entrypoint: []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev",
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
if tsic.headscaleHostname != "" {
|
||||
tailscaleOptions.ExtraHosts = []string{
|
||||
"host.docker.internal:host-gateway",
|
||||
fmt.Sprintf("%s:host-gateway", tsic.headscaleHostname),
|
||||
}
|
||||
}
|
||||
|
||||
// dockertest isnt very good at handling containers that has already
|
||||
// been created, this is an attempt to make sure this container isnt
|
||||
// present.
|
||||
@@ -155,20 +88,14 @@ func New(
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
tsic.container = container
|
||||
return &TailscaleInContainer{
|
||||
version: version,
|
||||
hostname: hostname,
|
||||
|
||||
if tsic.hasTLS() {
|
||||
err = tsic.WriteFile(headscaleCertPath, tsic.headscaleCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tsic, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) hasTLS() bool {
|
||||
return len(t.headscaleCert) != 0
|
||||
pool: pool,
|
||||
container: container,
|
||||
network: network,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Shutdown() error {
|
||||
@@ -183,13 +110,11 @@ func (t *TailscaleInContainer) Version() string {
|
||||
return t.version
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) ID() string {
|
||||
return t.container.Container.ID
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Execute(
|
||||
command []string,
|
||||
) (string, string, error) {
|
||||
log.Println("command", command)
|
||||
log.Printf("running command for %s\n", t.hostname)
|
||||
stdout, stderr, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
@@ -226,10 +151,6 @@ func (t *TailscaleInContainer) Up(
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
if t.withSSH {
|
||||
command = append(command, "--ssh")
|
||||
}
|
||||
|
||||
if _, _, err := t.Execute(command); err != nil {
|
||||
return fmt.Errorf("failed to join tailscale client: %w", err)
|
||||
}
|
||||
@@ -249,10 +170,7 @@ func (t *TailscaleInContainer) UpWithLoginURL(
|
||||
t.hostname,
|
||||
}
|
||||
|
||||
_, stderr, err := t.Execute(command)
|
||||
if errors.Is(err, errTailscaleNotLoggedIn) {
|
||||
return nil, errTailscaleCannotUpWithoutAuthkey
|
||||
}
|
||||
_, stderr, _ := t.Execute(command)
|
||||
|
||||
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
|
||||
urlStr = strings.TrimSpace(urlStr)
|
||||
@@ -396,10 +314,6 @@ func (t *TailscaleInContainer) Ping(hostnameOrIP string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) WriteFile(path string, data []byte) error {
|
||||
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
|
||||
}
|
||||
|
||||
func createTailscaleBuildOptions(version string) *dockertest.BuildOptions {
|
||||
var tailscaleBuildOptions *dockertest.BuildOptions
|
||||
switch version {
|
||||
|
558
integration_oidc_test.go
Normal file
558
integration_oidc_test.go
Normal file
@@ -0,0 +1,558 @@
|
||||
// nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
const (
|
||||
oidcHeadscaleHostname = "headscale-oidc"
|
||||
oidcMockHostname = "headscale-mock-oidc"
|
||||
oidcNamespaceName = "oidcnamespace"
|
||||
totalOidcContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationOIDCTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
mockOidc dockertest.Resource
|
||||
saveLogs bool
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestIntegrationOIDCTestSuite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration tests due to short flag")
|
||||
}
|
||||
|
||||
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
|
||||
if err != nil {
|
||||
saveLogs = false
|
||||
}
|
||||
|
||||
s := new(IntegrationOIDCTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.saveLogs = saveLogs
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
if s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.mockOidc); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
t.Logf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
|
||||
}
|
||||
|
||||
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
|
||||
}
|
||||
s.network = network
|
||||
|
||||
log.Printf("Network config: %v", s.network.Network.IPAM.Config[0])
|
||||
|
||||
s.Suite.T().Log("Setting up mock OIDC")
|
||||
mockOidcOptions := &dockertest.RunOptions{
|
||||
Name: oidcMockHostname,
|
||||
Cmd: []string{"headscale", "mockoidc"},
|
||||
ExposedPorts: []string{"10000/tcp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"10000/tcp": {{HostPort: "10000"}},
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", oidcMockHostname),
|
||||
"MOCKOIDC_PORT=10000",
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
},
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.debug",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(oidcMockHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
DockerRestartPolicy); err == nil {
|
||||
s.mockOidc = *pmockoidc
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not start mockOIDC container: %s", err), "")
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Waiting for headscale mock oidc to be ready for tests")
|
||||
hostEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.mockOidc.GetIPInNetwork(&s.network),
|
||||
s.mockOidc.GetPort("10000/tcp"),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
s.Suite.T().Log("headscale-mock-oidc container is ready for embedded OIDC tests")
|
||||
|
||||
oidcCfg := fmt.Sprintf(`
|
||||
oidc:
|
||||
issuer: http://%s:10000/oidc
|
||||
client_id: superclient
|
||||
client_secret: supersecret
|
||||
strip_email_domain: true`, s.mockOidc.GetIPInNetwork(&s.network))
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
|
||||
}
|
||||
|
||||
baseConfig, err := os.ReadFile(
|
||||
path.Join(currentPath, "integration_test/etc_oidc/base_config.yaml"))
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not read base config: %s", err), "")
|
||||
}
|
||||
config := string(baseConfig) + oidcCfg
|
||||
|
||||
log.Println(config)
|
||||
|
||||
configPath := path.Join(currentPath, "integration_test/etc_oidc/config.yaml")
|
||||
err = os.WriteFile(configPath, []byte(config), 0o644)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not write config: %s", err), "")
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: oidcHeadscaleHostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Mounts: []string{
|
||||
path.Join(currentPath,
|
||||
"integration_test/etc_oidc:/etc/headscale",
|
||||
),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
"3478/udp": {{HostPort: "3478"}},
|
||||
},
|
||||
}
|
||||
|
||||
err = s.pool.RemoveContainerByName(oidcHeadscaleHostname)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not remove existing container before building test: %s",
|
||||
err,
|
||||
),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Creating headscale container for OIDC integration tests")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
|
||||
}
|
||||
s.Suite.T().Logf("Created headscale container for embedded OIDC tests")
|
||||
|
||||
s.Suite.T().Logf("Creating tailscale containers for embedded OIDC tests")
|
||||
|
||||
for i := 0; i < totalOidcContainers; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
s.Suite.T().Logf("Waiting for headscale to be ready for embedded OIDC tests")
|
||||
hostMockEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.headscale.GetIPInNetwork(&s.network),
|
||||
s.headscale.GetPort("8443/tcp"),
|
||||
)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostMockEndpoint)
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
log.Printf("headscale for embedded OIDC tests is not ready: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
s.Suite.T().Log("headscale container is ready for embedded OIDC tests")
|
||||
|
||||
s.Suite.T().Logf("Creating headscale namespace: %s\n", oidcNamespaceName)
|
||||
result, _, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", oidcNamespaceName},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf(
|
||||
"https://headscale:%s",
|
||||
s.headscale.GetPort("8443/tcp"),
|
||||
)
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.AuthenticateOIDC(headscaleEndpoint, hostname, tailscale)
|
||||
|
||||
// TODO(juan): Workaround for https://github.com/juanfont/headscale/issues/814
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) AuthenticateOIDC(
|
||||
endpoint, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
loginURL, err := s.joinOIDC(endpoint, hostname, tailscale)
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not join OIDC node: %s", err), "")
|
||||
}
|
||||
|
||||
insecureTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(loginURL.String())
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("auth body, err: %#v, %s", resp, err)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
if err != nil {
|
||||
s.FailNow(fmt.Sprintf("Could not read login page: %s", err), "")
|
||||
}
|
||||
|
||||
log.Printf("Login page for %s: %s", hostname, string(body))
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) joinOIDC(
|
||||
endpoint, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) (*url.URL, error) {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, stderr, _ := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
|
||||
// This piece of code just gets the login URL out of the stderr of the tailscale client.
|
||||
// See https://github.com/tailscale/tailscale/blob/main/cmd/tailscale/cli/up.go#L584.
|
||||
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
|
||||
urlStr = strings.TrimSpace(urlStr)
|
||||
|
||||
// parse URL
|
||||
loginUrl, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse login URL: %s", err)
|
||||
log.Printf("Original join command result: %s", stderr)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loginUrl, nil
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) tailscaleContainer(
|
||||
identifier, version string,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := getDockerBuildOptions(version)
|
||||
|
||||
hostname := fmt.Sprintf(
|
||||
"tailscale-%s-%s",
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
|
||||
// expose the host IP address, so we can access it from inside the container
|
||||
ExtraHosts: []string{
|
||||
"host.docker.internal:host-gateway",
|
||||
"headscale:host-gateway",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
s.FailNow(
|
||||
fmt.Sprintf(
|
||||
"Could not start tailscale container version %s: %s",
|
||||
version,
|
||||
err,
|
||||
),
|
||||
)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) TearDownSuite() {
|
||||
if !s.saveLogs {
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.pool.Purge(&s.mockOidc); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationOIDCTestSuite) TestPingAllPeersByAddress() {
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
ips, err := getIPs(s.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, peerIPs := range ips {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().
|
||||
Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
stdout, stderr, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf(
|
||||
"result for %s: stdout: %s, stderr: %s\n",
|
||||
hostname,
|
||||
stdout,
|
||||
stderr,
|
||||
)
|
||||
assert.Contains(t, stdout, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -46,6 +46,7 @@ private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:18080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
|
@@ -45,6 +45,7 @@ private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:18080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
|
@@ -46,6 +46,7 @@ private_key_path: private.key
|
||||
noise:
|
||||
private_key_path: noise_private.key
|
||||
server_url: http://headscale:8080
|
||||
tls_client_auth_mode: relaxed
|
||||
tls_letsencrypt_cache_dir: /var/www/.cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
unix_socket: /var/run/headscale.sock
|
||||
|
@@ -14,6 +14,7 @@ listen_addr: 0.0.0.0:8443
|
||||
server_url: https://headscale-oidc:8443
|
||||
tls_cert_path: "/etc/headscale/tls/server.crt"
|
||||
tls_key_path: "/etc/headscale/tls/server.key"
|
||||
tls_client_auth_mode: disabled
|
||||
derp:
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
@@ -744,11 +744,7 @@ func (machine Machine) toNode(
|
||||
|
||||
KeepAlive: true,
|
||||
MachineAuthorized: !machine.isExpired(),
|
||||
Capabilities: []string{
|
||||
tailcfg.CapabilityFileSharing,
|
||||
tailcfg.CapabilityAdmin,
|
||||
tailcfg.CapabilitySSH,
|
||||
},
|
||||
Capabilities: []string{tailcfg.CapabilityFileSharing},
|
||||
}
|
||||
|
||||
return &node, nil
|
||||
|
@@ -211,10 +211,7 @@ func (n *Namespace) toLogin() *tailcfg.Login {
|
||||
return &login
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponseUserProfiles(
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
) []tailcfg.UserProfile {
|
||||
func getMapResponseUserProfiles(machine Machine, peers Machines) []tailcfg.UserProfile {
|
||||
namespaceMap := make(map[string]Namespace)
|
||||
namespaceMap[machine.Namespace.Name] = machine.Namespace
|
||||
for _, peer := range peers {
|
||||
@@ -223,17 +220,11 @@ func (h *Headscale) getMapResponseUserProfiles(
|
||||
|
||||
profiles := []tailcfg.UserProfile{}
|
||||
for _, namespace := range namespaceMap {
|
||||
displayName := namespace.Name
|
||||
|
||||
if h.cfg.BaseDomain != "" {
|
||||
displayName = fmt.Sprintf("%s@%s", namespace.Name, h.cfg.BaseDomain)
|
||||
}
|
||||
|
||||
profiles = append(profiles,
|
||||
tailcfg.UserProfile{
|
||||
ID: tailcfg.UserID(namespace.ID),
|
||||
LoginName: namespace.Name,
|
||||
DisplayName: displayName,
|
||||
DisplayName: namespace.Name,
|
||||
})
|
||||
}
|
||||
|
||||
|
@@ -209,7 +209,7 @@ func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) {
|
||||
peersOfMachine1InShared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
userProfiles := app.getMapResponseUserProfiles(
|
||||
userProfiles := getMapResponseUserProfiles(
|
||||
*machineInShared1,
|
||||
peersOfMachine1InShared1,
|
||||
)
|
||||
|
98
oidc.go
98
oidc.go
@@ -76,55 +76,20 @@ func (h *Headscale) RegisterOIDC(
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
nodeKeyStr, ok := vars["nkey"]
|
||||
if !ok || nodeKeyStr == "" {
|
||||
log.Error().
|
||||
Caller().
|
||||
Msg("Missing node key in URL")
|
||||
http.Error(writer, "Missing node key in URL", http.StatusBadRequest)
|
||||
|
||||
log.Debug().
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node_key", nodeKeyStr).
|
||||
Bool("ok", ok).
|
||||
Msg("Received oidc register call")
|
||||
|
||||
if !NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
|
||||
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We need to make sure we dont open for XSS style injections, if the parameter that
|
||||
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
|
||||
// the template and log an error.
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(NodePublicKeyEnsurePrefix(nodeKeyStr)),
|
||||
)
|
||||
|
||||
if !ok || nodeKeyStr == "" || err != nil {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Msg("Failed to parse incoming nodekey in OIDC registration")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("Wrong params"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
randomBlob := make([]byte, randomByteSize)
|
||||
if _, err := rand.Read(randomBlob); err != nil {
|
||||
log.Error().
|
||||
@@ -138,7 +103,7 @@ func (h *Headscale) RegisterOIDC(
|
||||
stateStr := hex.EncodeToString(randomBlob)[:32]
|
||||
|
||||
// place the node key into the state cache, so it can be retrieved later
|
||||
h.registrationCache.Set(stateStr, NodePublicKeyStripPrefix(nodeKey), registerCacheExpiration)
|
||||
h.registrationCache.Set(stateStr, nodeKeyStr, registerCacheExpiration)
|
||||
|
||||
// Add any extra parameter provided in the configuration to the Authorize Endpoint request
|
||||
extras := make([]oauth2.AuthCodeOption, 0, len(h.cfg.OIDC.ExtraParams))
|
||||
@@ -440,8 +405,8 @@ func (h *Headscale) validateMachineForOIDCCallback(
|
||||
claims *IDTokenClaims,
|
||||
) (*key.NodePublic, bool, error) {
|
||||
// retrieve machinekey from state cache
|
||||
nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
|
||||
if !nodeKeyFound {
|
||||
machineKeyIf, machineKeyFound := h.registrationCache.Get(state)
|
||||
if !machineKeyFound {
|
||||
log.Error().
|
||||
Msg("requested machine state key expired before authorisation completed")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
@@ -454,38 +419,20 @@ func (h *Headscale) validateMachineForOIDCCallback(
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return nil, false, errOIDCNodeKeyMissing
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
nodeKeyFromCache, nodeKeyOK := nodeKeyIf.(string)
|
||||
if !nodeKeyOK {
|
||||
log.Error().
|
||||
Msg("requested machine state key is not a string")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("state is invalid"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return nil, false, errOIDCInvalidMachineState
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
nodeKeyFromCache, nodeKeyOK := machineKeyIf.(string)
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(NodePublicKeyEnsurePrefix(nodeKeyFromCache)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("nodeKey", nodeKeyFromCache).
|
||||
Bool("nodeKeyOK", nodeKeyOK).
|
||||
Msg("could not parse node public key")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, werr := writer.Write([]byte("could not parse node public key"))
|
||||
_, werr := writer.Write([]byte("could not parse public key"))
|
||||
if werr != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -496,6 +443,21 @@ func (h *Headscale) validateMachineForOIDCCallback(
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if !nodeKeyOK {
|
||||
log.Error().Msg("could not get node key from cache")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("could not get node key from cache"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return nil, false, errOIDCNodeKeyMissing
|
||||
}
|
||||
|
||||
// retrieve machine information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new machine and we will move
|
||||
|
@@ -220,8 +220,8 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
}
|
||||
|
||||
switch platform {
|
||||
case "macos-standalone":
|
||||
if err := macosStandaloneTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
case "macos-standlone":
|
||||
if err := macosStandloneTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
handleMacError(err)
|
||||
|
||||
return
|
||||
@@ -390,7 +390,7 @@ var macosAppStoreTemplate = template.Must(template.New("macosTemplate").Parse(`
|
||||
</dict>
|
||||
`))
|
||||
|
||||
var macosStandaloneTemplate = template.Must(template.New("macosStandaloneTemplate").Parse(`
|
||||
var macosStandloneTemplate = template.Must(template.New("macosStandloneTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.macsys</string>
|
||||
|
@@ -490,7 +490,6 @@ func (h *Headscale) handleNewMachineCommon(
|
||||
Bool("noise", machineKey.IsZero()).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node seems to be new, sending auth url")
|
||||
|
||||
if h.oauth2Config != nil {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
"%s/oidc/register/%s",
|
||||
@@ -529,7 +528,6 @@ func (h *Headscale) handleNewMachineCommon(
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", machineKey.IsZero()).
|
||||
Str("AuthURL", resp.AuthURL).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Successfully sent auth url")
|
||||
}
|
||||
@@ -728,7 +726,7 @@ func (h *Headscale) handleMachineExpiredCommon(
|
||||
if h.oauth2Config != nil {
|
||||
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
registerRequest.NodeKey)
|
||||
NodePublicKeyStripPrefix(registerRequest.NodeKey))
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
|
@@ -92,7 +92,7 @@
|
||||
<code>defaults write io.tailscale.ipn.macos ControlURL {{.URL}}</code>
|
||||
</li>
|
||||
<li>
|
||||
for standalone client:
|
||||
for standlone client:
|
||||
<code>defaults write io.tailscale.ipn.macsys ControlURL {{.URL}}</code>
|
||||
</li>
|
||||
</ul>
|
||||
|
Reference in New Issue
Block a user